diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index 3805eb0a2..d2690110e 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -12,8 +12,9 @@ RUN apt-get clean -y && \ apt-get install -y zip # Set user permissions and directory -RUN (id -g ${BUILDER_GID} || groupadd --system -g ${BUILDER_GID} ${BUILDER_GROUP}) \ - && (id -u ${BUILDER_UID} || useradd --system --shell /bin/bash -u ${BUILDER_UID} -g ${BUILDER_GID} -m elastic) \ +# Use getent instead of id to handle cross-platform UID/GID conflicts (macOS vs Linux) +RUN (getent group ${BUILDER_GID} || groupadd --system -g ${BUILDER_GID} ${BUILDER_GROUP}) \ + && (getent passwd ${BUILDER_UID} || useradd --system --shell /bin/bash -u ${BUILDER_UID} -g ${BUILDER_GID} -m elastic) \ && mkdir -p /usr/src/elasticsearch-js \ && chown -R ${BUILDER_UID}:${BUILDER_GID} /usr/src/ diff --git a/.github/make.sh b/.github/make.sh index d8d9cc391..62b868fb8 100755 --- a/.github/make.sh +++ b/.github/make.sh @@ -2,6 +2,7 @@ # ------------------------------------------------------- # # # Build entry script for elasticsearch-js +# Cross-platform compatible (macOS & Linux) # # Must be called: ./.github/make.sh # @@ -22,7 +23,13 @@ # ------------------------------------------------------- # # Bootstrap # ------------------------------------------------------- # -script_path=$(dirname "$(realpath -s "$0")") +if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS doesn't support -s flag + script_path=$(dirname "$(realpath "$0")") +else + # Linux supports -s flag + script_path=$(dirname "$(realpath -s "$0")") +fi repo=$(realpath "$script_path/../") # shellcheck disable=SC1090 diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index 9f6c7db44..9c4586226 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,164 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class AsyncSearch { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'async_search.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'async_search.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'typed_keys', + 'wait_for_completion_timeout' + ] + }, + 'async_search.status': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive' + ] + }, + 'async_search.submit': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'request_cache', + 'routing', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort' + ] + } + } } /** * Delete an async search. If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/async-search.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.AsyncSearchDeleteRequest | TB.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['async_search.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +199,31 @@ export default class AsyncSearch { * Get async search results. Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/async-search.html | Elasticsearch API documentation} */ - async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> - async get> (this: That, params: T.AsyncSearchGetRequest | TB.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['async_search.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,18 +244,31 @@ export default class AsyncSearch { * Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: * The user or API key that submitted the original async search request. * Users that have the `monitor` cluster privilege or greater privileges. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/async-search.html | Elasticsearch API documentation} */ - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise - async status (this: That, params: T.AsyncSearchStatusRequest | TB.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['async_search.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -144,29 +289,34 @@ export default class AsyncSearch { * Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/async-search.html | Elasticsearch API documentation} */ - async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> - async submit> (this: That, params?: T.AsyncSearchSubmitRequest | TB.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['async_search.submit'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} - // @ts-expect-error if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line - // @ts-expect-error querystring[key] = params[key] } else { // @ts-expect-error @@ -174,9 +324,15 @@ export default class AsyncSearch { } } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index bcd3260c6..613180209 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,90 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Autoscaling { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'autoscaling.delete_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'autoscaling.get_autoscaling_capacity': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.get_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.put_autoscaling_policy': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Delete an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/autoscaling-delete-autoscaling-policy.html | Elasticsearch API documentation} */ - async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise - async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest | TB.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.delete_autoscaling_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,19 +125,32 @@ export default class Autoscaling { * Get the autoscaling capacity. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} */ - async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise - async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest | TB.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.get_autoscaling_capacity'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -110,18 +168,31 @@ export default class Autoscaling { * Get an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/autoscaling-get-autoscaling-capacity.html | Elasticsearch API documentation} */ - async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise - async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest | TB.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.get_autoscaling_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -142,25 +213,35 @@ export default class Autoscaling { * Create or update an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/autoscaling-put-autoscaling-policy.html | Elasticsearch API documentation} */ - async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise - async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest | TB.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['policy'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['autoscaling.put_autoscaling_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index 392d0116d..7780afc42 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,71 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + bulk: { + path: [ + 'index' + ], + body: [ + 'operations' + ], + query: [ + 'include_source_on_error', + 'list_executed_pipelines', + 'pipeline', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'timeout', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } +} /** * Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. * To use the `delete` action, you must have the `delete` or `write` index privilege. * To use the `update` action, you must have the `index` or `write` index privilege. * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\n optional_source\n action_and_meta_data\n optional_source\n .... action_and_meta_data\n optional_source\n ``` The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. NOTE: Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. NOTE: The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: * Go: Check out `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. * Ruby: Check out `Elasticsearch::Helpers::BulkHelper` **Submitting bulk requests with cURL** If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ``` **Optimistic concurrency control** Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. **Routing** Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. **Refresh** Control when the changes made by this request are visible to search. NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-bulk.html | Elasticsearch API documentation} */ -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise -export default async function BulkApi (this: That, params: T.BulkRequest | TB.BulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['operations'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.bulk + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/capabilities.ts b/src/api/api/capabilities.ts index 4d6a6a473..caec02ac3 100644 --- a/src/api/api/capabilities.ts +++ b/src/api/api/capabilities.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,26 +21,49 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + capabilities: { + path: [], + body: [], + query: [] + } +} /** * Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported * @see {@link https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc#require-or-skip-api-capabilities | Elasticsearch API documentation} */ -export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> -export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise -export default async function CapabilitiesApi (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.capabilities + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index 2dc44bf15..1a6d41916 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,368 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Cat { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'cat.aliases': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'expand_wildcards', + 'local' + ] + }, + 'cat.allocation': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.component_templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.count': { + path: [ + 'index' + ], + body: [], + query: [ + 'h', + 's' + ] + }, + 'cat.fielddata': { + path: [ + 'fields' + ], + body: [], + query: [ + 'bytes', + 'fields', + 'h', + 's' + ] + }, + 'cat.health': { + path: [], + body: [], + query: [ + 'time', + 'ts', + 'h', + 's' + ] + }, + 'cat.help': { + path: [], + body: [], + query: [] + }, + 'cat.indices': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'expand_wildcards', + 'health', + 'include_unloaded_segments', + 'pri', + 'time', + 'master_timeout', + 'h', + 's' + ] + }, + 'cat.master': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.ml_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'from', + 'size', + 'time' + ] + }, + 'cat.nodeattrs': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.nodes': { + path: [], + body: [], + query: [ + 'bytes', + 'full_id', + 'include_unloaded_segments', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.pending_tasks': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout', + 'time' + ] + }, + 'cat.plugins': { + path: [], + body: [], + query: [ + 'h', + 's', + 'include_bootstrap', + 'local', + 'master_timeout' + ] + }, + 'cat.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'bytes', + 'detailed', + 'index', + 'h', + 's', + 'time' + ] + }, + 'cat.repositories': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.shards': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.snapshots': { + path: [ + 'repository' + ], + body: [], + query: [ + 'ignore_unavailable', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.tasks': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'nodes', + 'parent_task_id', + 'h', + 's', + 'time', + 'timeout', + 'wait_for_completion' + ] + }, + 'cat.templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.thread_pool': { + path: [ + 'thread_pool_patterns' + ], + body: [], + query: [ + 'h', + 's', + 'time', + 'local', + 'master_timeout' + ] + }, + 'cat.transforms': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'h', + 's', + 'time', + 'size' + ] + } + } } /** * Get aliases. Get the cluster's index aliases, including filter and routing information. This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-alias.html | Elasticsearch API documentation} */ - async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise - async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.aliases'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -88,19 +410,32 @@ export default class Cat { * Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-allocation.html | Elasticsearch API documentation} */ - async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise - async allocation (this: That, params?: T.CatAllocationRequest | TB.CatAllocationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.allocation'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -128,19 +463,32 @@ export default class Cat { * Get component templates. Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-component-templates.html | Elasticsearch API documentation} */ - async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise - async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.component_templates'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -168,19 +516,32 @@ export default class Cat { * Get a document count. Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-count.html | Elasticsearch API documentation} */ - async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> - async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise - async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.count'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -208,19 +569,32 @@ export default class Cat { * Get field data cache information. Get the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-fielddata.html | Elasticsearch API documentation} */ - async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise - async fielddata (this: That, params?: T.CatFielddataRequest | TB.CatFielddataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['fields'] - const querystring: Record = {} - const body = undefined + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.fielddata'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -248,19 +622,32 @@ export default class Cat { * Get the cluster health status. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-health.html | Elasticsearch API documentation} */ - async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> - async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise - async health (this: That, params?: T.CatHealthRequest | TB.CatHealthRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.health'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -278,19 +665,32 @@ export default class Cat { * Get CAT help. Get help for the CAT APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat.html | Elasticsearch API documentation} */ - async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> - async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise - async help (this: That, params?: T.CatHelpRequest | TB.CatHelpRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.help'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -308,19 +708,32 @@ export default class Cat { * Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-indices.html | Elasticsearch API documentation} */ - async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise - async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.indices'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -348,19 +761,32 @@ export default class Cat { * Get master node information. Get information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-master.html | Elasticsearch API documentation} */ - async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise - async master (this: That, params?: T.CatMasterRequest | TB.CatMasterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.master'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -378,19 +804,32 @@ export default class Cat { * Get data frame analytics jobs. Get configuration and usage information about data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-dfanalytics.html | Elasticsearch API documentation} */ - async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -418,19 +857,32 @@ export default class Cat { * Get datafeeds. Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-datafeeds.html | Elasticsearch API documentation} */ - async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise - async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const querystring: Record = {} - const body = undefined + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_datafeeds'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -458,19 +910,32 @@ export default class Cat { * Get anomaly detection jobs. Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-anomaly-detectors.html | Elasticsearch API documentation} */ - async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise - async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_jobs'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -498,19 +963,32 @@ export default class Cat { * Get trained models. Get configuration and usage information about inference trained models. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-trained-model.html | Elasticsearch API documentation} */ - async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise - async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_trained_models'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -538,19 +1016,32 @@ export default class Cat { * Get node attribute information. Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-nodeattrs.html | Elasticsearch API documentation} */ - async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise - async nodeattrs (this: That, params?: T.CatNodeattrsRequest | TB.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.nodeattrs'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -568,19 +1059,32 @@ export default class Cat { * Get node information. Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-nodes.html | Elasticsearch API documentation} */ - async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise - async nodes (this: That, params?: T.CatNodesRequest | TB.CatNodesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.nodes'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -598,19 +1102,32 @@ export default class Cat { * Get pending task information. Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-pending-tasks.html | Elasticsearch API documentation} */ - async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> - async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise - async pendingTasks (this: That, params?: T.CatPendingTasksRequest | TB.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.pending_tasks'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -628,19 +1145,32 @@ export default class Cat { * Get plugin information. Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-plugins.html | Elasticsearch API documentation} */ - async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise - async plugins (this: That, params?: T.CatPluginsRequest | TB.CatPluginsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.plugins'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -658,19 +1188,32 @@ export default class Cat { * Get shard recovery information. Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-recovery.html | Elasticsearch API documentation} */ - async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise - async recovery (this: That, params?: T.CatRecoveryRequest | TB.CatRecoveryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.recovery'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -698,19 +1241,32 @@ export default class Cat { * Get snapshot repository information. Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-repositories.html | Elasticsearch API documentation} */ - async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise - async repositories (this: That, params?: T.CatRepositoriesRequest | TB.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.repositories'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -728,19 +1284,32 @@ export default class Cat { * Get segment information. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-segments.html | Elasticsearch API documentation} */ - async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise - async segments (this: That, params?: T.CatSegmentsRequest | TB.CatSegmentsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.segments'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -768,19 +1337,32 @@ export default class Cat { * Get shard information. Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-shards.html | Elasticsearch API documentation} */ - async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise - async shards (this: That, params?: T.CatShardsRequest | TB.CatShardsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.shards'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -808,19 +1390,32 @@ export default class Cat { * Get snapshot information. Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-snapshots.html | Elasticsearch API documentation} */ - async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise - async snapshots (this: That, params?: T.CatSnapshotsRequest | TB.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] - const querystring: Record = {} - const body = undefined + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.snapshots'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -848,19 +1443,32 @@ export default class Cat { * Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-tasks.html | Elasticsearch API documentation} */ - async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> - async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise - async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.tasks'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -878,19 +1486,32 @@ export default class Cat { * Get index template information. Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-templates.html | Elasticsearch API documentation} */ - async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise - async templates (this: That, params?: T.CatTemplatesRequest | TB.CatTemplatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.templates'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -918,19 +1539,32 @@ export default class Cat { * Get thread pool statistics. Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-thread-pool.html | Elasticsearch API documentation} */ - async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> - async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise - async threadPool (this: That, params?: T.CatThreadPoolRequest | TB.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['thread_pool_patterns'] - const querystring: Record = {} - const body = undefined + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.thread_pool'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -958,19 +1592,32 @@ export default class Cat { * Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cat-transforms.html | Elasticsearch API documentation} */ - async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise - async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cat.transforms'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index e17b02a10..7023caac5 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,216 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ccr { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ccr.delete_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow': { + path: [ + 'index' + ], + body: [ + 'data_stream_name', + 'leader_index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'remote_cluster', + 'settings' + ], + query: [ + 'master_timeout', + 'wait_for_active_shards' + ] + }, + 'ccr.follow_info': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ccr.forget_follower': { + path: [ + 'index' + ], + body: [ + 'follower_cluster', + 'follower_index', + 'follower_index_uuid', + 'leader_remote_cluster' + ], + query: [ + 'timeout' + ] + }, + 'ccr.get_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_follow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.put_auto_follow_pattern': { + path: [ + 'name' + ], + body: [ + 'remote_cluster', + 'follow_index_pattern', + 'leader_index_patterns', + 'leader_index_exclusion_patterns', + 'max_outstanding_read_requests', + 'settings', + 'max_outstanding_write_requests', + 'read_poll_timeout', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_follow': { + path: [ + 'index' + ], + body: [ + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ccr.unfollow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** * Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-delete-auto-follow-pattern.html | Elasticsearch API documentation} */ - async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest | TB.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.delete_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,20 +251,27 @@ export default class Ccr { * Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-put-follow.html | Elasticsearch API documentation} */ - async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> - async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise - async follow (this: That, params: T.CcrFollowRequest | TB.CcrFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['data_stream_name', 'leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster', 'settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.follow'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -103,9 +281,15 @@ export default class Ccr { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -124,18 +308,31 @@ export default class Ccr { * Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-get-follow-info.html | Elasticsearch API documentation} */ - async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise - async followInfo (this: That, params: T.CcrFollowInfoRequest | TB.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.follow_info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -156,18 +353,31 @@ export default class Ccr { * Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-get-follow-stats.html | Elasticsearch API documentation} */ - async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise - async followStats (this: That, params: T.CcrFollowStatsRequest | TB.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.follow_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -188,20 +398,27 @@ export default class Ccr { * Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-post-forget-follower.html | Elasticsearch API documentation} */ - async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> - async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise - async forgetFollower (this: That, params: T.CcrForgetFollowerRequest | TB.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.forget_follower'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -211,9 +428,15 @@ export default class Ccr { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -232,19 +455,32 @@ export default class Ccr { * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-get-auto-follow-pattern.html | Elasticsearch API documentation} */ - async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest | TB.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.get_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -272,18 +508,31 @@ export default class Ccr { * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-pause-auto-follow-pattern.html | Elasticsearch API documentation} */ - async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest | TB.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.pause_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -304,18 +553,31 @@ export default class Ccr { * Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-post-pause-follow.html | Elasticsearch API documentation} */ - async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> - async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise - async pauseFollow (this: That, params: T.CcrPauseFollowRequest | TB.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.pause_follow'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -336,20 +598,27 @@ export default class Ccr { * Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-put-auto-follow-pattern.html | Elasticsearch API documentation} */ - async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest | TB.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.put_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -359,9 +628,15 @@ export default class Ccr { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -380,18 +655,31 @@ export default class Ccr { * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-resume-auto-follow-pattern.html | Elasticsearch API documentation} */ - async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise - async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest | TB.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.resume_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -412,20 +700,27 @@ export default class Ccr { * Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-post-resume-follow.html | Elasticsearch API documentation} */ - async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise - async resumeFollow (this: That, params: T.CcrResumeFollowRequest | TB.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.resume_follow'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -435,9 +730,15 @@ export default class Ccr { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -456,19 +757,32 @@ export default class Ccr { * Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-get-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.CcrStatsRequest | TB.CcrStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -486,18 +800,31 @@ export default class Ccr { * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. > info > Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ccr-post-unfollow.html | Elasticsearch API documentation} */ - async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> - async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise - async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ccr.unfollow'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 766289790..2205ff234 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,48 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + clear_scroll: { + path: [], + body: [ + 'scroll_id' + ], + query: [] + } +} /** * Clear a scrolling search. Clear the search context and results for a scrolling search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/clear-scroll-api.html | Elasticsearch API documentation} */ -export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise -export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest | TB.ClearScrollRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['scroll_id'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.clear_scroll + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -66,9 +73,15 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 6f82f539e..54af607af 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,48 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + close_point_in_time: { + path: [], + body: [ + 'id' + ], + query: [] + } +} /** * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/point-in-time-api.html | Elasticsearch API documentation} */ -export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise -export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest | TB.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.close_point_in_time + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +72,15 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 54d0ed098..baf934b43 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,33 +21,231 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Cluster { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'cluster.allocation_explain': { + path: [], + body: [ + 'current_node', + 'index', + 'primary', + 'shard' + ], + query: [ + 'include_disk_info', + 'include_yes_decisions', + 'master_timeout' + ] + }, + 'cluster.delete_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'cluster.delete_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'master_timeout', + 'wait_for_removal' + ] + }, + 'cluster.exists_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'local' + ] + }, + 'cluster.get_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'settings_filter', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'cluster.get_settings': { + path: [], + body: [], + query: [ + 'flat_settings', + 'include_defaults', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.health': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'level', + 'local', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'wait_for_events', + 'wait_for_nodes', + 'wait_for_no_initializing_shards', + 'wait_for_no_relocating_shards', + 'wait_for_status' + ] + }, + 'cluster.info': { + path: [ + 'target' + ], + body: [], + query: [] + }, + 'cluster.pending_tasks': { + path: [], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'cluster.post_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'node_names', + 'node_ids', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.put_component_template': { + path: [ + 'name' + ], + body: [ + 'template', + 'version', + '_meta', + 'deprecated' + ], + query: [ + 'create', + 'cause', + 'master_timeout' + ] + }, + 'cluster.put_settings': { + path: [], + body: [ + 'persistent', + 'transient' + ], + query: [ + 'flat_settings', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.remote_info': { + path: [], + body: [], + query: [] + }, + 'cluster.reroute': { + path: [], + body: [ + 'commands' + ], + query: [ + 'dry_run', + 'explain', + 'metric', + 'retry_failed', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.state': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'wait_for_metadata_version', + 'wait_for_timeout' + ] + }, + 'cluster.stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'include_remotes', + 'timeout' + ] + } + } } /** * Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-allocation-explain.html | Elasticsearch API documentation} */ - async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> - async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise - async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest | TB.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.allocation_explain'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -72,9 +256,15 @@ export default class Cluster { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -90,18 +280,31 @@ export default class Cluster { * Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-component-template.html | Elasticsearch API documentation} */ - async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise - async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest | TB.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.delete_component_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -122,19 +325,32 @@ export default class Cluster { * Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} */ - async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise - async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest | TB.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.delete_voting_config_exclusions'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -152,18 +368,31 @@ export default class Cluster { * Check component templates. Returns information about whether a particular component template exists. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-component-template.html | Elasticsearch API documentation} */ - async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise - async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest | TB.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.exists_component_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -184,19 +413,32 @@ export default class Cluster { * Get component templates. Get information about component templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-component-template.html | Elasticsearch API documentation} */ - async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise - async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest | TB.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.get_component_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -224,19 +466,32 @@ export default class Cluster { * Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.ClusterGetSettingsRequest | TB.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.get_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -254,19 +509,32 @@ export default class Cluster { * Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-health.html | Elasticsearch API documentation} */ - async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> - async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise - async health (this: That, params?: T.ClusterHealthRequest | TB.ClusterHealthRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.health'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -294,18 +562,31 @@ export default class Cluster { * Get cluster info. Returns basic information about the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-info.html | Elasticsearch API documentation} */ - async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptions): Promise - async info (this: That, params: T.ClusterInfoRequest | TB.ClusterInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['target'] - const querystring: Record = {} - const body = undefined + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -326,19 +607,32 @@ export default class Cluster { * Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-pending.html | Elasticsearch API documentation} */ - async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> - async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise - async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest | TB.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.pending_tasks'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -356,19 +650,32 @@ export default class Cluster { * Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} */ - async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise - async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest | TB.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.post_voting_config_exclusions'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -386,20 +693,27 @@ export default class Cluster { * Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. **Applying component templates** You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-component-template.html | Elasticsearch API documentation} */ - async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise - async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest | TB.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['template', 'version', '_meta', 'deprecated'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.put_component_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -409,9 +723,15 @@ export default class Cluster { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -430,20 +750,27 @@ export default class Cluster { * Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-update-settings.html | Elasticsearch API documentation} */ - async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise - async putSettings (this: That, params?: T.ClusterPutSettingsRequest | TB.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['persistent', 'transient'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.put_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -454,9 +781,15 @@ export default class Cluster { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -472,19 +805,32 @@ export default class Cluster { * Get remote cluster information. Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias. > info > This API returns information that reflects current state on the local cluster. > The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. > Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. > To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the `/_resolve/cluster` endpoint. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-remote-info.html | Elasticsearch API documentation} */ - async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise - async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.remote_info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -502,20 +848,27 @@ export default class Cluster { * Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-reroute.html | Elasticsearch API documentation} */ - async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise - async reroute (this: That, params?: T.ClusterRerouteRequest | TB.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['commands'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.reroute'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -526,9 +879,15 @@ export default class Cluster { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -544,19 +903,32 @@ export default class Cluster { * Get the cluster state. Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-state.html | Elasticsearch API documentation} */ - async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise - async state (this: That, params?: T.ClusterStateRequest | TB.ClusterStateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric', 'index'] - const querystring: Record = {} - const body = undefined + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.state'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -588,19 +960,32 @@ export default class Cluster { * Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.ClusterStatsRequest | TB.ClusterStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['cluster.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 9472b218b..ba0791f23 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,369 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Connector { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'connector.check_in': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.delete': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'delete_sync_jobs' + ] + }, + 'connector.get': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.last_sync': { + path: [ + 'connector_id' + ], + body: [ + 'last_access_control_sync_error', + 'last_access_control_sync_scheduled_at', + 'last_access_control_sync_status', + 'last_deleted_document_count', + 'last_incremental_sync_scheduled_at', + 'last_indexed_document_count', + 'last_seen', + 'last_sync_error', + 'last_sync_scheduled_at', + 'last_sync_status', + 'last_synced', + 'sync_cursor' + ], + query: [] + }, + 'connector.list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'index_name', + 'connector_name', + 'service_type', + 'query' + ] + }, + 'connector.post': { + path: [], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.put': { + path: [ + 'connector_id' + ], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.secret_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_get': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_post': { + path: [], + body: [], + query: [] + }, + 'connector.secret_put': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.sync_job_cancel': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_check_in': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_claim': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'sync_cursor', + 'worker_hostname' + ], + query: [] + }, + 'connector.sync_job_delete': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_error': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.sync_job_get': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'status', + 'connector_id', + 'job_type' + ] + }, + 'connector.sync_job_post': { + path: [], + body: [ + 'id', + 'job_type', + 'trigger_method' + ], + query: [] + }, + 'connector.sync_job_update_stats': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'deleted_document_count', + 'indexed_document_count', + 'indexed_document_volume', + 'last_seen', + 'metadata', + 'total_document_count' + ], + query: [] + }, + 'connector.update_active_filtering': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.update_api_key_id': { + path: [ + 'connector_id' + ], + body: [ + 'api_key_id', + 'api_key_secret_id' + ], + query: [] + }, + 'connector.update_configuration': { + path: [ + 'connector_id' + ], + body: [ + 'configuration', + 'values' + ], + query: [] + }, + 'connector.update_error': { + path: [ + 'connector_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.update_features': { + path: [ + 'connector_id' + ], + body: [ + 'features' + ], + query: [] + }, + 'connector.update_filtering': { + path: [ + 'connector_id' + ], + body: [ + 'filtering', + 'rules', + 'advanced_snippet' + ], + query: [] + }, + 'connector.update_filtering_validation': { + path: [ + 'connector_id' + ], + body: [ + 'validation' + ], + query: [] + }, + 'connector.update_index_name': { + path: [ + 'connector_id' + ], + body: [ + 'index_name' + ], + query: [] + }, + 'connector.update_name': { + path: [ + 'connector_id' + ], + body: [ + 'name', + 'description' + ], + query: [] + }, + 'connector.update_native': { + path: [ + 'connector_id' + ], + body: [ + 'is_native' + ], + query: [] + }, + 'connector.update_pipeline': { + path: [ + 'connector_id' + ], + body: [ + 'pipeline' + ], + query: [] + }, + 'connector.update_scheduling': { + path: [ + 'connector_id' + ], + body: [ + 'scheduling' + ], + query: [] + }, + 'connector.update_service_type': { + path: [ + 'connector_id' + ], + body: [ + 'service_type' + ], + query: [] + }, + 'connector.update_status': { + path: [ + 'connector_id' + ], + body: [ + 'status' + ], + query: [] + } + } } /** * Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/check-in-connector-api.html | Elasticsearch API documentation} */ - async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> - async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise - async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const querystring: Record = {} - const body = undefined + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.check_in'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +404,31 @@ export default class Connector { * Delete a connector. Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-connector-api.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const querystring: Record = {} - const body = undefined + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,18 +449,31 @@ export default class Connector { * Get a connector. Get the details about a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-connector-api.html | Elasticsearch API documentation} */ - async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const querystring: Record = {} - const body = undefined + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -144,20 +494,27 @@ export default class Connector { * Update the connector last sync stats. Update the fields related to the last sync of a connector. This action is used for analytics and monitoring. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-last-sync-api.html | Elasticsearch API documentation} */ - async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> - async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise - async lastSync (this: That, params: T.ConnectorLastSyncRequest | TB.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['last_access_control_sync_error', 'last_access_control_sync_scheduled_at', 'last_access_control_sync_status', 'last_deleted_document_count', 'last_incremental_sync_scheduled_at', 'last_indexed_document_count', 'last_seen', 'last_sync_error', 'last_sync_scheduled_at', 'last_sync_status', 'last_synced', 'sync_cursor'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.last_sync'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -167,9 +524,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -188,19 +551,32 @@ export default class Connector { * Get all connectors. Get information about all connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/list-connector-api.html | Elasticsearch API documentation} */ - async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptions): Promise - async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.list'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -218,20 +594,27 @@ export default class Connector { * Create a connector. Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/create-connector-api.html | Elasticsearch API documentation} */ - async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> - async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptions): Promise - async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.post'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -242,9 +625,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -260,20 +649,27 @@ export default class Connector { * Create or update a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/create-connector-api.html | Elasticsearch API documentation} */ - async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptions): Promise - async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.put'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -284,9 +680,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -311,19 +713,32 @@ export default class Connector { /** * Deletes a connector secret. */ - async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async secretDelete (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -342,19 +757,32 @@ export default class Connector { /** * Retrieves a secret stored by Connectors. */ - async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async secretGet (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -373,19 +801,32 @@ export default class Connector { /** * Creates a secret for a Connector. */ - async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async secretPost (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_post'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -401,19 +842,32 @@ export default class Connector { /** * Creates or updates a secret for a Connector. */ - async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async secretPut (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_put'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -433,18 +887,31 @@ export default class Connector { * Cancel a connector sync job. Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cancel-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise - async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const querystring: Record = {} - const body = undefined + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_cancel'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -465,18 +932,31 @@ export default class Connector { * Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/check-in-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest | TB.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest | TB.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest | TB.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise - async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest | TB.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const querystring: Record = {} - const body = undefined + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_check_in'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -497,20 +977,27 @@ export default class Connector { * Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/claim-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest | TB.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest | TB.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest | TB.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise - async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest | TB.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['sync_cursor', 'worker_hostname'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_claim'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -520,9 +1007,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -541,18 +1034,31 @@ export default class Connector { * Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise - async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const querystring: Record = {} - const body = undefined + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -573,20 +1079,27 @@ export default class Connector { * Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/set-connector-sync-job-error-api.html | Elasticsearch API documentation} */ - async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest | TB.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest | TB.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest | TB.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise - async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest | TB.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['error'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_error'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -596,9 +1109,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -617,18 +1136,31 @@ export default class Connector { * Get a connector sync job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise - async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const querystring: Record = {} - const body = undefined + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -649,19 +1181,32 @@ export default class Connector { * Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/list-connector-sync-jobs-api.html | Elasticsearch API documentation} */ - async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise - async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_list'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -679,20 +1224,27 @@ export default class Connector { * Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/create-connector-sync-job-api.html | Elasticsearch API documentation} */ - async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise - async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_post'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -702,9 +1254,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -720,20 +1278,27 @@ export default class Connector { * Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/set-connector-sync-job-stats-api.html | Elasticsearch API documentation} */ - async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest | TB.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest | TB.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest | TB.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise - async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest | TB.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['deleted_document_count', 'indexed_document_count', 'indexed_document_volume', 'last_seen', 'metadata', 'total_document_count'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_update_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -743,9 +1308,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -764,18 +1335,31 @@ export default class Connector { * Activate the connector draft filter. Activates the valid draft filtering for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-filtering-api.html | Elasticsearch API documentation} */ - async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise - async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const querystring: Record = {} - const body = undefined + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['connector.update_active_filtering'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -796,20 +1380,27 @@ export default class Connector { * Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-api-key-id-api.html | Elasticsearch API documentation} */ - async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise - async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_api_key_id'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -819,9 +1410,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -840,20 +1437,27 @@ export default class Connector { * Update the connector configuration. Update the configuration field in the connector document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-configuration-api.html | Elasticsearch API documentation} */ - async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise - async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['configuration', 'values'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -863,9 +1467,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -884,20 +1494,27 @@ export default class Connector { * Update the connector error field. Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-error-api.html | Elasticsearch API documentation} */ - async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise - async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['error'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_error'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -907,9 +1524,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -928,20 +1551,27 @@ export default class Connector { * Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: * document-level security * incremental syncs * advanced sync rules * basic sync rules Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-features-api.html | Elasticsearch API documentation} */ - async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest | TB.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest | TB.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest | TB.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise - async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest | TB.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['features'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_features'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -951,9 +1581,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -972,20 +1608,27 @@ export default class Connector { * Update the connector filtering. Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-filtering-api.html | Elasticsearch API documentation} */ - async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise - async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_filtering'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -995,9 +1638,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1016,20 +1665,27 @@ export default class Connector { * Update the connector draft filtering validation. Update the draft filtering validation info for a connector. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-filtering-validation-api.html | Elasticsearch API documentation} */ - async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise - async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['validation'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_filtering_validation'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1039,9 +1695,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1060,20 +1722,27 @@ export default class Connector { * Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-index-name-api.html | Elasticsearch API documentation} */ - async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise - async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['index_name'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_index_name'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1083,9 +1752,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1104,20 +1779,27 @@ export default class Connector { * Update the connector name and description. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-name-description-api.html | Elasticsearch API documentation} */ - async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise - async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['name', 'description'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_name'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1127,9 +1809,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1148,20 +1836,27 @@ export default class Connector { * Update the connector is_native flag. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-native-api.html | Elasticsearch API documentation} */ - async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise - async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['is_native'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_native'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1171,9 +1866,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1192,20 +1893,27 @@ export default class Connector { * Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-pipeline-api.html | Elasticsearch API documentation} */ - async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise - async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['pipeline'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1215,9 +1923,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1236,20 +1950,27 @@ export default class Connector { * Update the connector scheduling. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-scheduling-api.html | Elasticsearch API documentation} */ - async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise - async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['scheduling'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_scheduling'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1259,9 +1980,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1280,20 +2007,27 @@ export default class Connector { * Update the connector service type. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-service-type-api.html | Elasticsearch API documentation} */ - async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise - async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['service_type'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_service_type'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1303,9 +2037,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1324,20 +2064,27 @@ export default class Connector { * Update the connector status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-connector-status-api.html | Elasticsearch API documentation} */ - async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise - async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['status'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1347,9 +2094,15 @@ export default class Connector { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 92b2b211e..87b009b6e 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,65 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + count: { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'min_score', + 'preference', + 'routing', + 'terminate_after', + 'q' + ] + } +} /** * Count search results. Get the number of documents matching a query. The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-count.html | Elasticsearch API documentation} */ -export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise -export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.count + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -66,9 +90,15 @@ export default async function CountApi (this: That, params?: T.CountRequest | TB body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index dffc7ef76..cad032697 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,70 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + create: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'include_source_on_error', + 'pipeline', + 'refresh', + 'require_alias', + 'require_data_stream', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} /** * Create a new document in the index. You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-index_.html | Elasticsearch API documentation} */ -export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise -export default async function CreateApi (this: That, params: T.CreateRequest | TB.CreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['document'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.create + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index 94e246ffd..31508d8e3 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,77 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class DanglingIndices { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'dangling_indices.delete_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.import_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.list_dangling_indices': { + path: [], + body: [], + query: [] + } + } } /** * Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/dangling-index-delete.html | Elasticsearch API documentation} */ - async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise - async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest | TB.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index_uuid'] - const querystring: Record = {} - const body = undefined + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.delete_dangling_index'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +112,31 @@ export default class DanglingIndices { * Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/dangling-index-import.html | Elasticsearch API documentation} */ - async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> - async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise - async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest | TB.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index_uuid'] - const querystring: Record = {} - const body = undefined + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.import_dangling_index'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,19 +157,32 @@ export default class DanglingIndices { * Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/dangling-indices-list.html | Elasticsearch API documentation} */ - async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise - async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest | TB.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.list_dangling_indices'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 238e06a51..70c7dfa67 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,60 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'if_primary_term', + 'if_seq_no', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} /** * Delete a document. Remove a JSON document from the specified index. NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** If routing is used during indexing, the routing value also needs to be specified to delete a document. If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 ``` This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. **Distributed** The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-delete.html | Elasticsearch API documentation} */ -export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise -export default async function DeleteApi (this: That, params: T.DeleteRequest | TB.DeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.delete + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index 7a720ab57..923bef823 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,83 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + delete_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'slice', + 'sort' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'preference', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'q', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} /** * Delete documents. Deletes documents that match the specified query. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `delete` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. **Cancel a delete by query operation** Any delete by query can be canceled using the task cancel API. For example: ``` POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ``` The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-delete-by-query.html | Elasticsearch API documentation} */ -export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise -export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest | TB.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'slice'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.delete_by_query + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +107,15 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 49712acc0..8cf94fdf0 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,52 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-delete-by-query.html#docs-delete-by-query-rethrottle | Elasticsearch API documentation} */ -export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise -export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest | TB.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.delete_by_query_rethrottle + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index fbb8c6899..cbb73d5ea 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,53 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } +} /** * Delete a script or search template. Deletes a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-stored-script-api.html | Elasticsearch API documentation} */ -export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise -export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.delete_script + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index 4ed06fb3d..396b0545a 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,100 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Enrich { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'enrich.delete_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.execute_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'enrich.get_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.put_policy': { + path: [ + 'name' + ], + body: [ + 'geo_match', + 'match', + 'range' + ], + query: [ + 'master_timeout' + ] + }, + 'enrich.stats': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** * Delete an enrich policy. Deletes an existing enrich policy and its enrich index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-enrich-policy-api.html | Elasticsearch API documentation} */ - async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise - async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest | TB.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['enrich.delete_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +135,31 @@ export default class Enrich { * Run an enrich policy. Create the enrich index for an existing enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/execute-enrich-policy-api.html | Elasticsearch API documentation} */ - async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise - async executePolicy (this: That, params: T.EnrichExecutePolicyRequest | TB.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['enrich.execute_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,19 +180,32 @@ export default class Enrich { * Get an enrich policy. Returns information about an enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-enrich-policy-api.html | Elasticsearch API documentation} */ - async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise - async getPolicy (this: That, params?: T.EnrichGetPolicyRequest | TB.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['enrich.get_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -152,20 +233,27 @@ export default class Enrich { * Create an enrich policy. Creates an enrich policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-enrich-policy-api.html | Elasticsearch API documentation} */ - async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise - async putPolicy (this: That, params: T.EnrichPutPolicyRequest | TB.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['geo_match', 'match', 'range'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['enrich.put_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -175,9 +263,15 @@ export default class Enrich { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -196,19 +290,32 @@ export default class Enrich { * Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/enrich-stats-api.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.EnrichStatsRequest | TB.EnrichStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['enrich.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 44dfe4ac7..ba9dd50a8 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,111 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Eql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'eql.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'eql.get_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.search': { + path: [ + 'index' + ], + body: [ + 'query', + 'case_sensitive', + 'event_category_field', + 'tiebreaker_field', + 'timestamp_field', + 'fetch_size', + 'filter', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'size', + 'fields', + 'result_position', + 'runtime_mappings', + 'max_samples_per_key' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'expand_wildcards', + 'ccs_minimize_roundtrips', + 'ignore_unavailable', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout' + ] + } + } } /** * Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-eql-delete | Elasticsearch API documentation} */ - async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['eql.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +146,31 @@ export default class Eql { * Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-async-eql-search-api.html | Elasticsearch API documentation} */ - async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise> - async get (this: That, params: T.EqlGetRequest | TB.EqlGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['eql.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,18 +191,31 @@ export default class Eql { * Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-async-eql-status-api.html | Elasticsearch API documentation} */ - async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise - async getStatus (this: That, params: T.EqlGetStatusRequest | TB.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['eql.get_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -144,20 +236,27 @@ export default class Eql { * Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/eql-search-api.html | Elasticsearch API documentation} */ - async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise> - async search (this: That, params: T.EqlSearchRequest | TB.EqlSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'allow_partial_search_results', 'allow_partial_sequence_results', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['eql.search'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -167,9 +266,15 @@ export default class Eql { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index 67cadd67c..273a41464 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,33 +21,117 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Esql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'esql.async_query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion' + ], + query: [ + 'allow_partial_results', + 'delimiter', + 'drop_null_columns', + 'format' + ] + }, + 'esql.async_query_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'esql.async_query_get': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'esql.async_query_stop': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns' + ] + }, + 'esql.query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata' + ], + query: [ + 'format', + 'delimiter', + 'drop_null_columns', + 'allow_partial_results' + ] + } + } } /** * Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/esql-async-query-api.html | Elasticsearch API documentation} */ - async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest | TB.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest | TB.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest | TB.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise - async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest | TB.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata', 'wait_for_completion_timeout', 'keep_alive', 'keep_on_completion'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['esql.async_query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -71,9 +141,15 @@ export default class Esql { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -89,18 +165,31 @@ export default class Esql { * Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted. If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: * The authenticated user that submitted the original query request * Users with the `cancel_task` cluster privilege * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/esql-async-query-delete-api.html | Elasticsearch API documentation} */ - async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest | TB.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest | TB.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest | TB.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise - async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest | TB.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -121,18 +210,31 @@ export default class Esql { * Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/esql-async-query-get-api.html | Elasticsearch API documentation} */ - async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest | TB.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest | TB.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest | TB.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise - async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest | TB.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -153,18 +255,31 @@ export default class Esql { * Stop async ES|QL query. This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/esql-async-query-stop-api.html | Elasticsearch API documentation} */ - async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest | TB.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest | TB.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> - async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest | TB.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise - async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest | TB.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_stop'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -185,20 +300,27 @@ export default class Esql { * Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/esql-rest.html | Elasticsearch API documentation} */ - async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise - async query (this: That, params: T.EsqlQueryRequest | TB.EsqlQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['esql.query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -208,9 +330,15 @@ export default class Esql { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index f00700618..ce0ed7357 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,62 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Check a document. Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-get.html | Elasticsearch API documentation} */ -export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise -export default async function ExistsApi (this: That, params: T.ExistsRequest | TB.ExistsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.exists + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 849f67315..7808f4336 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,61 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } +} /** * Check for a document source. Check whether a document source exists in an index. For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not available if it is disabled in the mapping. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-get.html | Elasticsearch API documentation} */ -export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise -export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest | TB.ExistsSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.exists_source + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 3fa4a9a60..b228908eb 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,64 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + explain: { + path: [ + 'id', + 'index' + ], + body: [ + 'query' + ], + query: [ + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'lenient', + 'preference', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'q' + ] + } +} /** * Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-explain.html | Elasticsearch API documentation} */ -export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise> -export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['query'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.explain + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +88,15 @@ export default async function ExplainApi (this: That, param body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 82ed82cb3..1ae8f8191 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,65 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Features { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'features.get_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'features.reset_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** * Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-features-api.html | Elasticsearch API documentation} */ - async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise - async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest | TB.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['features.get_features'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -78,19 +97,32 @@ export default class Features { * Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/reset-features-api.html | Elasticsearch API documentation} */ - async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise - async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['features.reset_features'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index a45d05dd3..301f56556 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,61 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + field_caps: { + path: [ + 'index' + ], + body: [ + 'fields', + 'index_filter', + 'runtime_mappings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'fields', + 'ignore_unavailable', + 'include_unmapped', + 'filters', + 'types', + 'include_empty_fields' + ] + } +} /** * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-field-caps.html | Elasticsearch API documentation} */ -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise -export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest | TB.FieldCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['fields', 'index_filter', 'runtime_mappings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.field_caps + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -66,9 +86,15 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 30fcdd1a6..845b38a9c 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,191 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Fleet { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'fleet.delete_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.get_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.global_checkpoints': { + path: [ + 'index' + ], + body: [], + query: [ + 'wait_for_advance', + 'wait_for_index', + 'checkpoints', + 'timeout' + ] + }, + 'fleet.msearch': { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + }, + 'fleet.post_secret': { + path: [], + body: [], + query: [] + }, + 'fleet.search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'min_compatible_shard_node', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + } + } } /** * Deletes a secret stored by Fleet. */ - async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['fleet.delete_secret'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -78,19 +224,32 @@ export default class Fleet { /** * Retrieves a secret stored by Fleet. */ - async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['fleet.get_secret'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -110,18 +269,31 @@ export default class Fleet { * Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-global-checkpoints.html | Elasticsearch API documentation} */ - async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise - async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['fleet.global_checkpoints'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -142,25 +314,35 @@ export default class Fleet { * Executes several fleet searches with a single API request. The API follows the same structure as the multi search (`_msearch`) API. However, similar to the fleet search API, it supports the `wait_for_checkpoints` parameter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/fleet-multi-search.html | Elasticsearch API documentation} */ - async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptions): Promise> - async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['searches'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['fleet.msearch'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -185,19 +367,32 @@ export default class Fleet { /** * Creates a secret stored by Fleet. */ - async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async postSecret (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['fleet.post_secret'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -214,20 +409,27 @@ export default class Fleet { * The purpose of the fleet search api is to provide a search api where the search will only be executed after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/fleet-search.html | Elasticsearch API documentation} */ - async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise> - async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['fleet.search'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -237,9 +439,15 @@ export default class Fleet { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 68e14e118..90235c44a 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,63 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Get a document by its ID. Get a document and its source or stored fields from an index. By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ``` If you only want to specify includes, you can use a shorter notation: ``` GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. **Distributed** The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-get.html | Elasticsearch API documentation} */ -export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise> -export default async function GetApi (this: That, params: T.GetRequest | TB.GetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index 46a1acd72..d91c78ab6 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,52 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout' + ] + } +} /** * Get a script or search template. Retrieves a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-stored-script-api.html | Elasticsearch API documentation} */ -export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise -export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get_script + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index 04c1dd3f9..21794aa2c 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,26 +21,49 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_context: { + path: [], + body: [], + query: [] + } +} /** * Get script contexts. Get a list of supported script contexts and their methods. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-script-contexts-api.html | Elasticsearch API documentation} */ -export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise -export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get_script_context + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 32c1669b5..7ce9663ae 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,26 +21,49 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_languages: { + path: [], + body: [], + query: [] + } +} /** * Get script languages. Get a list of available script types, languages, and contexts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-script-languages-api.html | Elasticsearch API documentation} */ -export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise -export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get_script_languages + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index 41184f134..11a01aadf 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,61 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } +} /** * Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 ``` You can use the source filtering parameters to control which parts of the `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-get.html | Elasticsearch API documentation} */ -export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise> -export default async function GetSourceApi (this: That, params: T.GetSourceRequest | TB.GetSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const querystring: Record = {} - const body = undefined +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get_source + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index fc1d0bbcd..0e9556da8 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,33 +21,63 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Graph { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'graph.explore': { + path: [ + 'index' + ], + body: [ + 'connections', + 'controls', + 'query', + 'vertices' + ], + query: [ + 'routing', + 'timeout' + ] + } + } } /** * Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/graph-explore-api.html | Elasticsearch API documentation} */ - async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise - async explore (this: That, params: T.GraphExploreRequest | TB.GraphExploreRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['graph.explore'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -71,9 +87,15 @@ export default class Graph { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index a6ae8fb1b..d12e160d1 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,26 +21,55 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + health_report: { + path: [ + 'feature' + ], + body: [], + query: [ + 'timeout', + 'verbose', + 'size' + ] + } +} /** * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/health-api.html | Elasticsearch API documentation} */ -export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptions): Promise -export default async function HealthReportApi (this: That, params?: T.HealthReportRequest | TB.HealthReportRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['feature'] - const querystring: Record = {} - const body = undefined +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.health_report + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 60d797b9b..9c510e1fd 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,150 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ilm { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ilm.delete_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.explain_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'only_errors', + 'only_managed', + 'master_timeout' + ] + }, + 'ilm.get_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.get_status': { + path: [], + body: [], + query: [] + }, + 'ilm.migrate_to_data_tiers': { + path: [], + body: [ + 'legacy_template_to_delete', + 'node_attribute' + ], + query: [ + 'dry_run' + ] + }, + 'ilm.move_to_step': { + path: [ + 'index' + ], + body: [ + 'current_step', + 'next_step' + ], + query: [] + }, + 'ilm.put_lifecycle': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.remove_policy': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.retry': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-delete-lifecycle.html | Elasticsearch API documentation} */ - async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise - async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest | TB.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ilm.delete_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +185,31 @@ export default class Ilm { * Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-explain-lifecycle.html | Elasticsearch API documentation} */ - async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise - async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest | TB.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ilm.explain_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,19 +230,32 @@ export default class Ilm { * Get lifecycle policies. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-get-lifecycle.html | Elasticsearch API documentation} */ - async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise - async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest | TB.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ilm.get_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -152,19 +283,32 @@ export default class Ilm { * Get the ILM status. Get the current index lifecycle management status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-get-status.html | Elasticsearch API documentation} */ - async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise - async getStatus (this: That, params?: T.IlmGetStatusRequest | TB.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ilm.get_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -182,20 +326,27 @@ export default class Ilm { * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. 1. Remove custom allocation settings from existing ILM policies. 1. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation} */ - async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> - async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise - async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest | TB.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['legacy_template_to_delete', 'node_attribute'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.migrate_to_data_tiers'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -206,9 +357,15 @@ export default class Ilm { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -224,20 +381,27 @@ export default class Ilm { * Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-move-to-step.html | Elasticsearch API documentation} */ - async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> - async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise - async moveToStep (this: That, params: T.IlmMoveToStepRequest | TB.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['current_step', 'next_step'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.move_to_step'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -247,9 +411,15 @@ export default class Ilm { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -268,20 +438,27 @@ export default class Ilm { * Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-put-lifecycle.html | Elasticsearch API documentation} */ - async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise - async putLifecycle (this: That, params: T.IlmPutLifecycleRequest | TB.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['policy'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.put_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -291,9 +468,15 @@ export default class Ilm { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -312,18 +495,31 @@ export default class Ilm { * Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-remove-policy.html | Elasticsearch API documentation} */ - async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise - async removePolicy (this: That, params: T.IlmRemovePolicyRequest | TB.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ilm.remove_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -344,18 +540,31 @@ export default class Ilm { * Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-retry-policy.html | Elasticsearch API documentation} */ - async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise - async retry (this: That, params: T.IlmRetryRequest | TB.IlmRetryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ilm.retry'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -376,19 +585,32 @@ export default class Ilm { * Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-start.html | Elasticsearch API documentation} */ - async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> - async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise - async start (this: That, params?: T.IlmStartRequest | TB.IlmStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ilm.start'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -406,19 +628,32 @@ export default class Ilm { * Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ilm-stop.html | Elasticsearch API documentation} */ - async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise - async stop (this: That, params?: T.IlmStopRequest | TB.IlmStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ilm.stop'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/index.ts b/src/api/api/index.ts index ca45264b9..8644ada46 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,73 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + index: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } +} /** * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-index_.html | Elasticsearch API documentation} */ -export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise -export default async function IndexApi (this: That, params: T.IndexRequest | TB.IndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['document'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.index + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index b5dff052d..d1e728954 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,929 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Indices { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'indices.add_block': { + path: [ + 'index', + 'block' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.analyze': { + path: [ + 'index' + ], + body: [ + 'analyzer', + 'attributes', + 'char_filter', + 'explain', + 'field', + 'filter', + 'normalizer', + 'text', + 'tokenizer' + ], + query: [ + 'index' + ] + }, + 'indices.cancel_migrate_reindex': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'fielddata', + 'fields', + 'ignore_unavailable', + 'query', + 'request' + ] + }, + 'indices.clone': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.close': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create': { + path: [ + 'index' + ], + body: [ + 'aliases', + 'mappings', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.create_from': { + path: [ + 'source', + 'dest' + ], + body: [ + 'create_from' + ], + query: [] + }, + 'indices.data_streams_stats': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards' + ] + }, + 'indices.delete': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_alias': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'expand_wildcards' + ] + }, + 'indices.delete_data_stream_options': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'indices.delete_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.disk_usage': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'run_expensive_tasks' + ] + }, + 'indices.downsample': { + path: [ + 'index', + 'target_index' + ], + body: [ + 'config' + ], + query: [] + }, + 'indices.exists': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] + }, + 'indices.exists_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local' + ] + }, + 'indices.exists_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout' + ] + }, + 'indices.exists_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.explain_data_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.field_usage_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'fields' + ] + }, + 'indices.flush': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'force', + 'ignore_unavailable', + 'wait_if_ongoing' + ] + }, + 'indices.forcemerge': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'max_num_segments', + 'only_expunge_deletes', + 'wait_for_completion' + ] + }, + 'indices.get': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout', + 'features' + ] + }, + 'indices.get_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local' + ] + }, + 'indices.get_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle_stats': { + path: [], + body: [], + query: [] + }, + 'indices.get_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout', + 'verbose' + ] + }, + 'indices.get_data_stream_options': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'indices.get_data_stream_settings': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'indices.get_field_mapping': { + path: [ + 'fields', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] + }, + 'indices.get_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.get_mapping': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout' + ] + }, + 'indices.get_migrate_reindex_status': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.get_settings': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'indices.get_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.migrate_reindex': { + path: [], + body: [ + 'reindex' + ], + query: [] + }, + 'indices.migrate_to_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.modify_data_stream': { + path: [], + body: [ + 'actions' + ], + query: [] + }, + 'indices.open': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.promote_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.put_alias': { + path: [ + 'index', + 'name' + ], + body: [ + 'filter', + 'index_routing', + 'is_write_index', + 'routing', + 'search_routing' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_lifecycle': { + path: [ + 'name' + ], + body: [ + 'data_retention', + 'downsampling', + 'enabled' + ], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_stream_options': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'indices.put_data_stream_settings': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'indices.put_index_template': { + path: [ + 'name' + ], + body: [ + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'allow_auto_create', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout', + 'cause' + ] + }, + 'indices.put_mapping': { + path: [ + 'index' + ], + body: [ + 'date_detection', + 'dynamic', + 'dynamic_date_formats', + 'dynamic_templates', + '_field_names', + '_meta', + 'numeric_detection', + 'properties', + '_routing', + '_source', + 'runtime' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'write_index_only' + ] + }, + 'indices.put_settings': { + path: [ + 'index' + ], + body: [ + 'settings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'master_timeout', + 'preserve_existing', + 'reopen', + 'timeout' + ] + }, + 'indices.put_template': { + path: [ + 'name' + ], + body: [ + 'aliases', + 'index_patterns', + 'mappings', + 'order', + 'settings', + 'version' + ], + query: [ + 'create', + 'master_timeout', + 'order', + 'cause' + ] + }, + 'indices.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'detailed', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.refresh': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.reload_search_analyzers': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'resource' + ] + }, + 'indices.resolve_cluster': { + path: [ + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'timeout' + ] + }, + 'indices.resolve_index': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'ignore_unavailable', + 'allow_no_indices' + ] + }, + 'indices.rollover': { + path: [ + 'alias', + 'new_index' + ], + body: [ + 'aliases', + 'conditions', + 'mappings', + 'settings' + ], + query: [ + 'dry_run', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'lazy' + ] + }, + 'indices.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'verbose' + ] + }, + 'indices.shard_stores': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'status' + ] + }, + 'indices.shrink': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.simulate_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.simulate_template': { + path: [ + 'name' + ], + body: [ + 'allow_auto_create', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.split': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.stats': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'completion_fields', + 'expand_wildcards', + 'fielddata_fields', + 'fields', + 'forbid_closed_indices', + 'groups', + 'include_segment_file_sizes', + 'include_unloaded_segments', + 'level' + ] + }, + 'indices.unfreeze': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.update_aliases': { + path: [], + body: [ + 'actions' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.validate_query': { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'all_shards', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'explain', + 'ignore_unavailable', + 'lenient', + 'rewrite', + 'q' + ] + } + } } /** * Add an index block. Add an index block to an index. Index blocks limit the operations allowed on an index by blocking specific operation types. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/index-modules-blocks.html#add-index-block | Elasticsearch API documentation} */ - async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> - async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise - async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'block'] - const querystring: Record = {} - const body = undefined + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.add_block'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -81,20 +965,27 @@ export default class Indices { * Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-analyze | Elasticsearch API documentation} */ - async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise - async analyze (this: That, params?: T.IndicesAnalyzeRequest | TB.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.analyze'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -105,9 +996,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -133,18 +1030,31 @@ export default class Indices { * Cancel a migration reindex operation. Cancel a migration reindex attempt for a data stream or index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/migrate-data-stream.html | Elasticsearch API documentation} */ - async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest | TB.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest | TB.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> - async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest | TB.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise - async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest | TB.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.cancel_migrate_reindex'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -165,19 +1075,32 @@ export default class Indices { * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-clearcache.html | Elasticsearch API documentation} */ - async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise - async clearCache (this: That, params?: T.IndicesClearCacheRequest | TB.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.clear_cache'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -205,20 +1128,27 @@ export default class Indices { * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. **Monitor the cloning process** The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. **Wait for active shards** Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-clone-index.html | Elasticsearch API documentation} */ - async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise - async clone (this: That, params: T.IndicesCloneRequest | TB.IndicesCloneRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.clone'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -228,9 +1158,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -250,18 +1186,31 @@ export default class Indices { * Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-close.html | Elasticsearch API documentation} */ - async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise - async close (this: That, params: T.IndicesCloseRequest | TB.IndicesCloseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.close'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -282,20 +1231,27 @@ export default class Indices { * Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: * Settings for the index. * Mappings for fields in the index. * Index aliases **Wait for active shards** By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-create-index.html | Elasticsearch API documentation} */ - async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise - async create (this: That, params: T.IndicesCreateRequest | TB.IndicesCreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.create'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -305,9 +1261,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -326,18 +1288,31 @@ export default class Indices { * Create a data stream. You must have a matching index template with data stream enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-create-data-stream.html | Elasticsearch API documentation} */ - async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise - async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.create_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -358,25 +1333,35 @@ export default class Indices { * Create an index from a source index. Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/migrate-data-stream.html | Elasticsearch API documentation} */ - async createFrom (this: That, params: T.IndicesCreateFromRequest | TB.IndicesCreateFromRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createFrom (this: That, params: T.IndicesCreateFromRequest | TB.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createFrom (this: That, params: T.IndicesCreateFromRequest | TB.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise - async createFrom (this: That, params: T.IndicesCreateFromRequest | TB.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['source', 'dest'] - const acceptedBody: string[] = ['create_from'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.create_from'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -396,19 +1381,32 @@ export default class Indices { * Get data stream stats. Get statistics for one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/data-stream-stats-api.html | Elasticsearch API documentation} */ - async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise - async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.data_streams_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -436,18 +1434,31 @@ export default class Indices { * Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-delete-index.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.IndicesDeleteRequest | TB.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -468,18 +1479,31 @@ export default class Indices { * Delete an alias. Removes a data stream or index from an alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-delete-alias.html | Elasticsearch API documentation} */ - async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise - async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest | TB.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] - const querystring: Record = {} - const body = undefined + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -508,18 +1532,31 @@ export default class Indices { * Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/data-streams-delete-lifecycle.html | Elasticsearch API documentation} */ - async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise - async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest | TB.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -540,18 +1577,31 @@ export default class Indices { * Delete data streams. Deletes one or more data streams and their backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-delete-data-stream.html | Elasticsearch API documentation} */ - async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise - async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -572,19 +1622,32 @@ export default class Indices { * Deletes the data stream options of the selected data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/index.html | Elasticsearch API documentation} */ - async deleteDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async deleteDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async deleteDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -604,18 +1667,31 @@ export default class Indices { * Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-delete-template.html | Elasticsearch API documentation} */ - async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise - async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest | TB.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -636,18 +1712,31 @@ export default class Indices { * Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-delete-template-v1.html | Elasticsearch API documentation} */ - async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise - async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest | TB.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -668,18 +1757,31 @@ export default class Indices { * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-disk-usage.html | Elasticsearch API documentation} */ - async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> - async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise - async diskUsage (this: That, params: T.IndicesDiskUsageRequest | TB.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.disk_usage'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -700,25 +1802,35 @@ export default class Indices { * Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-downsample-data-stream.html | Elasticsearch API documentation} */ - async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise - async downsample (this: That, params: T.IndicesDownsampleRequest | TB.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target_index'] - const acceptedBody: string[] = ['config'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.downsample'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -738,18 +1850,31 @@ export default class Indices { * Check indices. Check if one or more indices, index aliases, or data streams exist. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-exists.html | Elasticsearch API documentation} */ - async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise - async exists (this: That, params: T.IndicesExistsRequest | TB.IndicesExistsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.exists'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -770,18 +1895,31 @@ export default class Indices { * Check aliases. Check if one or more data stream or index aliases exist. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-alias | Elasticsearch API documentation} */ - async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise - async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name', 'index'] - const querystring: Record = {} - const body = undefined + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -810,18 +1948,31 @@ export default class Indices { * Check index templates. Check whether index templates exist. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-index-template | Elasticsearch API documentation} */ - async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise - async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -842,18 +1993,31 @@ export default class Indices { * Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-template-exists-v1.html | Elasticsearch API documentation} */ - async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise - async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest | TB.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -874,18 +2038,31 @@ export default class Indices { * Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/data-streams-explain-lifecycle.html | Elasticsearch API documentation} */ - async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise - async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest | TB.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.explain_data_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -906,18 +2083,31 @@ export default class Indices { * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/field-usage-stats.html | Elasticsearch API documentation} */ - async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise - async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest | TB.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.field_usage_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -938,19 +2128,32 @@ export default class Indices { * Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-flush.html | Elasticsearch API documentation} */ - async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> - async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise - async flush (this: That, params?: T.IndicesFlushRequest | TB.IndicesFlushRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.flush'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -978,19 +2181,32 @@ export default class Indices { * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. **Blocks during a force merge** Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. **Force merging multiple indices** You can force merge multiple indices with a single request by targeting: * One or more data streams that contain multiple backing indices * Multiple indices * One or more aliases * All data streams and indices in a cluster Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. **Data streams and time-based indices** Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-forcemerge.html | Elasticsearch API documentation} */ - async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise - async forcemerge (this: That, params?: T.IndicesForcemergeRequest | TB.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.forcemerge'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1018,18 +2234,31 @@ export default class Indices { * Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-get-index.html | Elasticsearch API documentation} */ - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.IndicesGetRequest | TB.IndicesGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1050,19 +2279,32 @@ export default class Indices { * Get aliases. Retrieves information for one or more data stream or index aliases. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-get-alias.html | Elasticsearch API documentation} */ - async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise - async getAlias (this: That, params?: T.IndicesGetAliasRequest | TB.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name', 'index'] - const querystring: Record = {} - const body = undefined + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1097,18 +2339,31 @@ export default class Indices { * Get data stream lifecycles. Get the data stream lifecycle configuration of one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/data-streams-get-lifecycle.html | Elasticsearch API documentation} */ - async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise - async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1129,19 +2384,32 @@ export default class Indices { * Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/data-streams-get-lifecycle-stats.html | Elasticsearch API documentation} */ - async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest | TB.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest | TB.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest | TB.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise - async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest | TB.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_lifecycle_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1159,19 +2427,32 @@ export default class Indices { * Get data streams. Get information about one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-get-data-stream.html | Elasticsearch API documentation} */ - async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise - async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1199,19 +2480,32 @@ export default class Indices { * Returns the data stream options of the selected data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/index.html | Elasticsearch API documentation} */ - async getDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -1231,19 +2525,32 @@ export default class Indices { * Gets a data stream's settings * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/data-streams.html | Elasticsearch API documentation} */ - async getDataStreamSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataStreamSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getDataStreamSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getDataStreamSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -1263,18 +2570,31 @@ export default class Indices { * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-get-field-mapping.html | Elasticsearch API documentation} */ - async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise - async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest | TB.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['fields', 'index'] - const querystring: Record = {} - const body = undefined + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_field_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1303,19 +2623,32 @@ export default class Indices { * Get index templates. Get information about one or more index templates. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-get-template.html | Elasticsearch API documentation} */ - async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise - async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest | TB.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1343,19 +2676,32 @@ export default class Indices { * Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-get-mapping.html | Elasticsearch API documentation} */ - async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise - async getMapping (this: That, params?: T.IndicesGetMappingRequest | TB.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1383,18 +2729,31 @@ export default class Indices { * Get the migration reindexing status. Get the status of a migration reindex attempt for a data stream or index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/migrate-data-stream.html | Elasticsearch API documentation} */ - async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest | TB.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest | TB.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest | TB.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise - async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest | TB.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_migrate_reindex_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1415,19 +2774,32 @@ export default class Indices { * Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.IndicesGetSettingsRequest | TB.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] - const querystring: Record = {} - const body = undefined + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1462,19 +2834,32 @@ export default class Indices { * Get legacy index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-get-template-v1.html | Elasticsearch API documentation} */ - async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise - async getTemplate (this: That, params?: T.IndicesGetTemplateRequest | TB.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1502,25 +2887,35 @@ export default class Indices { * Reindex legacy backing indices. Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/migrate-data-stream.html | Elasticsearch API documentation} */ - async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest | TB.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest | TB.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> - async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest | TB.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise - async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest | TB.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['reindex'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.migrate_reindex'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1536,18 +2931,31 @@ export default class Indices { * Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-migrate-to-data-stream | Elasticsearch API documentation} */ - async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise - async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest | TB.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.migrate_to_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1568,20 +2976,27 @@ export default class Indices { * Update data streams. Performs one or more data stream modification actions in a single atomic operation. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-modify-data-stream | Elasticsearch API documentation} */ - async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise - async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest | TB.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['actions'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.modify_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1591,9 +3006,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1609,18 +3030,31 @@ export default class Indices { * Open a closed index. For data streams, the API opens any closed backing indices. A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-open-close.html | Elasticsearch API documentation} */ - async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise - async open (this: That, params: T.IndicesOpenRequest | TB.IndicesOpenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.open'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1641,18 +3075,31 @@ export default class Indices { * Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-promote-data-stream | Elasticsearch API documentation} */ - async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> - async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise - async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest | TB.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.promote_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1673,20 +3120,27 @@ export default class Indices { * Create or update an alias. Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-alias | Elasticsearch API documentation} */ - async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise - async putAlias (this: That, params: T.IndicesPutAliasRequest | TB.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] - const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1696,9 +3150,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1725,20 +3185,27 @@ export default class Indices { * Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-data-lifecycle | Elasticsearch API documentation} */ - async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise - async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest | TB.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['data_retention', 'downsampling', 'enabled'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_data_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1748,9 +3215,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1769,19 +3242,32 @@ export default class Indices { * Updates the data stream options of the selected data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/index.html | Elasticsearch API documentation} */ - async putDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putDataStreamOptions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putDataStreamOptions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.put_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -1801,19 +3287,32 @@ export default class Indices { * Updates a data stream's settings * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/data-streams.html | Elasticsearch API documentation} */ - async putDataStreamSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataStreamSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putDataStreamSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async putDataStreamSettings (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putDataStreamSettings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.put_data_stream_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -1833,20 +3332,27 @@ export default class Indices { * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-index-template | Elasticsearch API documentation} */ - async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise - async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest | TB.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'allow_auto_create', 'ignore_missing_component_templates', 'deprecated'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1856,9 +3362,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1877,20 +3389,27 @@ export default class Indices { * Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. **Add multi-fields to an existing field** Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. **Change supported mapping parameters for an existing field** The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. **Change the mapping of an existing field** Except for supported mapping parameters, you can't change the mapping or field type of an existing field. Changing an existing field could invalidate data that's already indexed. If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. **Rename a field** Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-put-mapping.html | Elasticsearch API documentation} */ - async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise - async putMapping (this: That, params: T.IndicesPutMappingRequest | TB.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1900,9 +3419,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1921,25 +3446,35 @@ export default class Indices { * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-update-settings.html | Elasticsearch API documentation} */ - async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise - async putSettings (this: That, params: T.IndicesPutSettingsRequest | TB.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['settings'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1965,20 +3500,27 @@ export default class Indices { * Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-templates-v1.html | Elasticsearch API documentation} */ - async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise - async putTemplate (this: That, params: T.IndicesPutTemplateRequest | TB.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1988,9 +3530,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2009,19 +3557,32 @@ export default class Indices { * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-recovery.html | Elasticsearch API documentation} */ - async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise - async recovery (this: That, params?: T.IndicesRecoveryRequest | TB.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.recovery'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2049,19 +3610,32 @@ export default class Indices { * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-refresh.html | Elasticsearch API documentation} */ - async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> - async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise - async refresh (this: That, params?: T.IndicesRefreshRequest | TB.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.refresh'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2089,18 +3663,31 @@ export default class Indices { * Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-reload-analyzers.html | Elasticsearch API documentation} */ - async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> - async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise - async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest | TB.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.reload_search_analyzers'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2121,19 +3708,32 @@ export default class Indices { * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. ## Note on backwards compatibility The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like `*:*` to bypass the issue. ## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. ## Test availability of remote clusters The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it. You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. The `connected` field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ - async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise - async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.resolve_cluster'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2161,18 +3761,31 @@ export default class Indices { * Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-resolve-index-api.html | Elasticsearch API documentation} */ - async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise - async resolveIndex (this: That, params: T.IndicesResolveIndexRequest | TB.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.resolve_index'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2193,20 +3806,27 @@ export default class Indices { * Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. **Roll over a data stream** If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation. **Roll over an index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index's name. If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-rollover-index.html | Elasticsearch API documentation} */ - async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> - async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise - async rollover (this: That, params: T.IndicesRolloverRequest | TB.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['alias', 'new_index'] - const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.rollover'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2216,9 +3836,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2245,19 +3871,32 @@ export default class Indices { * Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-segments.html | Elasticsearch API documentation} */ - async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise - async segments (this: That, params?: T.IndicesSegmentsRequest | TB.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.segments'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2285,19 +3924,32 @@ export default class Indices { * Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices. The index shard stores API returns the following information: * The node on which each replica shard exists. * The allocation ID for each replica shard. * A unique ID for each replica shard. * Any errors encountered while opening the shard index or from an earlier failure. By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-shards-stores.html | Elasticsearch API documentation} */ - async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> - async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise - async shardStores (this: That, params?: T.IndicesShardStoresRequest | TB.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.shard_stores'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2325,20 +3977,27 @@ export default class Indices { * Shrink an index. Shrink an index into a new index with fewer primary shards. Before you can shrink an index: * The index must be read-only. * A copy of every shard in the index must reside on the same node. * The index must have a green health status. To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation. The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. A shrink operation: * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: * The target index must not exist. * The source index must have more primary shards than the target index. * The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. * The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. * The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-shrink-index.html | Elasticsearch API documentation} */ - async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> - async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise - async shrink (this: That, params: T.IndicesShrinkRequest | TB.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.shrink'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2348,9 +4007,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2370,18 +4035,31 @@ export default class Indices { * Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-simulate-index.html | Elasticsearch API documentation} */ - async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise - async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest | TB.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.simulate_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2402,20 +4080,27 @@ export default class Indices { * Simulate an index template. Get the index configuration that would be applied by a particular index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-simulate-template.html | Elasticsearch API documentation} */ - async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise - async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest | TB.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'ignore_missing_component_templates', 'deprecated'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.simulate_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -2426,9 +4111,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2454,20 +4145,27 @@ export default class Indices { * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. You can do make an index read-only with the following request using the add index block API: ``` PUT /my_source_index/_block/write ``` The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-split-index.html | Elasticsearch API documentation} */ - async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> - async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise - async split (this: That, params: T.IndicesSplitRequest | TB.IndicesSplitRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.split'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2477,9 +4175,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2499,19 +4203,32 @@ export default class Indices { * Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices. By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. To get shard-level statistics, set the `level` parameter to `shards`. NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/indices-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.IndicesStatsRequest | TB.IndicesStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric', 'index'] - const querystring: Record = {} - const body = undefined + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2546,18 +4263,31 @@ export default class Indices { * Unfreeze an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/unfreeze-index-api.html | Elasticsearch API documentation} */ - async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise - async unfreeze (this: That, params: T.IndicesUnfreezeRequest | TB.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise + async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.unfreeze'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2578,20 +4308,27 @@ export default class Indices { * Create or update an alias. Adds a data stream or index to an alias. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-update-aliases | Elasticsearch API documentation} */ - async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise - async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest | TB.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['actions'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.update_aliases'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -2602,9 +4339,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2620,20 +4363,27 @@ export default class Indices { * Validate a query. Validates a query without running it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-validate.html | Elasticsearch API documentation} */ - async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise - async validateQuery (this: That, params?: T.IndicesValidateQueryRequest | TB.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.validate_query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -2644,9 +4394,15 @@ export default class Indices { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 30a978415..53d395a48 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,38 +21,461 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Inference { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'inference.chat_completion_unified': { + path: [ + 'inference_id' + ], + body: [ + 'chat_completion_request' + ], + query: [ + 'timeout' + ] + }, + 'inference.completion': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.delete': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [ + 'dry_run', + 'force' + ] + }, + 'inference.get': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [] + }, + 'inference.inference': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'query', + 'input', + 'input_type', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'inference_config' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_alibabacloud': { + path: [ + 'task_type', + 'alibabacloud_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_amazonbedrock': { + path: [ + 'task_type', + 'amazonbedrock_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_amazonsagemaker': { + path: [ + 'task_type', + 'amazonsagemaker_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_anthropic': { + path: [ + 'task_type', + 'anthropic_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_azureaistudio': { + path: [ + 'task_type', + 'azureaistudio_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_azureopenai': { + path: [ + 'task_type', + 'azureopenai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_cohere': { + path: [ + 'task_type', + 'cohere_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_custom': { + path: [ + 'task_type', + 'custom_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_deepseek': { + path: [ + 'task_type', + 'deepseek_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_elasticsearch': { + path: [ + 'task_type', + 'elasticsearch_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_elser': { + path: [ + 'task_type', + 'elser_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_googleaistudio': { + path: [ + 'task_type', + 'googleaistudio_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_googlevertexai': { + path: [ + 'task_type', + 'googlevertexai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_hugging_face': { + path: [ + 'task_type', + 'huggingface_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_jinaai': { + path: [ + 'task_type', + 'jinaai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_mistral': { + path: [ + 'task_type', + 'mistral_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_openai': { + path: [ + 'task_type', + 'openai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_voyageai': { + path: [ + 'task_type', + 'voyageai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_watsonx': { + path: [ + 'task_type', + 'watsonx_inference_id' + ], + body: [ + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.rerank': { + path: [ + 'inference_id' + ], + body: [ + 'query', + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.sparse_embedding': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.stream_completion': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.text_embedding': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.update': { + path: [ + 'inference_id', + 'task_type' + ], + body: [ + 'inference_config' + ], + query: [] + } + } } /** * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/chat-completion-inference-api.html | Elasticsearch API documentation} */ - async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise - async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest | TB.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id'] - const acceptedBody: string[] = ['chat_completion_request'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.chat_completion_unified'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -85,20 +494,27 @@ export default class Inference { * Perform completion inference on the service * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation} */ - async completion (this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async completion (this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> - async completion (this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptions): Promise - async completion (this: That, params: T.InferenceCompletionRequest | TB.InferenceCompletionRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id'] - const acceptedBody: string[] = ['input', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptions): Promise + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.completion'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -108,9 +524,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -129,18 +551,31 @@ export default class Inference { * Delete an inference endpoint * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-inference-api.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.InferenceDeleteRequest | TB.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const querystring: Record = {} - const body = undefined + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['inference.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -169,19 +604,32 @@ export default class Inference { * Get an inference endpoint * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-inference-api.html | Elasticsearch API documentation} */ - async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params?: T.InferenceGetRequest | TB.InferenceGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const querystring: Record = {} - const body = undefined + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['inference.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -213,20 +661,27 @@ export default class Inference { * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation} */ - async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> - async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise - async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['query', 'input', 'input_type', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.inference'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -236,9 +691,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -265,25 +726,35 @@ export default class Inference { * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`chat_completion`, `completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * JinaAI (`rerank`, `text_embedding`) * Llama (`chat_completion`, `completion`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`rerank`, `text_embedding`) * Watsonx inference integration (`text_embedding`) * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-inference-api.html | Elasticsearch API documentation} */ - async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptions): Promise - async put (this: That, params: T.InferencePutRequest | TB.InferencePutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['inference_config'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -310,20 +781,27 @@ export default class Inference { * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-alibabacloud-ai-search.html | Elasticsearch API documentation} */ - async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest | TB.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest | TB.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest | TB.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise - async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest | TB.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'alibabacloud_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_alibabacloud'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -333,9 +811,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -355,20 +839,27 @@ export default class Inference { * Create an Amazon Bedrock inference endpoint. Create an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-amazon-bedrock.html | Elasticsearch API documentation} */ - async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest | TB.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest | TB.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest | TB.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise - async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest | TB.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'amazonbedrock_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_amazonbedrock'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -378,9 +869,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -400,20 +897,27 @@ export default class Inference { * Create an Amazon SageMaker inference endpoint. Create an inference endpoint to perform an inference task with the `amazon_sagemaker` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker | Elasticsearch API documentation} */ - async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest | TB.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest | TB.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest | TB.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise - async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest | TB.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'amazonsagemaker_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_amazonsagemaker'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -423,9 +927,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -445,20 +955,27 @@ export default class Inference { * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-anthropic.html | Elasticsearch API documentation} */ - async putAnthropic (this: That, params: T.InferencePutAnthropicRequest | TB.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAnthropic (this: That, params: T.InferencePutAnthropicRequest | TB.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAnthropic (this: That, params: T.InferencePutAnthropicRequest | TB.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise - async putAnthropic (this: That, params: T.InferencePutAnthropicRequest | TB.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'anthropic_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_anthropic'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -468,9 +985,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -490,20 +1013,27 @@ export default class Inference { * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-azure-ai-studio.html | Elasticsearch API documentation} */ - async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest | TB.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest | TB.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest | TB.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise - async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest | TB.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'azureaistudio_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_azureaistudio'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -513,9 +1043,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -535,20 +1071,27 @@ export default class Inference { * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-azure-openai.html | Elasticsearch API documentation} */ - async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest | TB.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest | TB.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest | TB.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise - async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest | TB.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'azureopenai_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_azureopenai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -558,9 +1101,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -580,20 +1129,27 @@ export default class Inference { * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-cohere.html | Elasticsearch API documentation} */ - async putCohere (this: That, params: T.InferencePutCohereRequest | TB.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putCohere (this: That, params: T.InferencePutCohereRequest | TB.InferencePutCohereRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putCohere (this: That, params: T.InferencePutCohereRequest | TB.InferencePutCohereRequest, options?: TransportRequestOptions): Promise - async putCohere (this: That, params: T.InferencePutCohereRequest | TB.InferencePutCohereRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'cohere_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptions): Promise + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_cohere'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -603,9 +1159,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -625,20 +1187,27 @@ export default class Inference { * Create a custom inference endpoint. The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. Templates are portions of a string that start with `${` and end with `}`. The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. If the definition (key) is not found for a template, an error message is returned. In case of an endpoint definition like the following: ``` PUT _inference/text_embedding/test-text-embedding { "service": "custom", "service_settings": { "secret_parameters": { "api_key": "" }, "url": "...endpoints.huggingface.cloud/v1/embeddings", "headers": { "Authorization": "Bearer ${api_key}", "Content-Type": "application/json" }, "request": "{\"input\": ${input}}", "response": { "json_parser": { "text_embeddings":"$.data[*].embedding[*]" } } } } ``` To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. > info > Templates should not be surrounded by quotes. Pre-defined templates: * `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. * `${input_type}` refers to the input type translation values. * `${query}` refers to the query field used specifically for reranking tasks. * `${top_n}` refers to the `top_n` field available when performing rerank requests. * `${return_documents}` refers to the `return_documents` field available when performing rerank requests. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom | Elasticsearch API documentation} */ - async putCustom (this: That, params: T.InferencePutCustomRequest | TB.InferencePutCustomRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putCustom (this: That, params: T.InferencePutCustomRequest | TB.InferencePutCustomRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putCustom (this: That, params: T.InferencePutCustomRequest | TB.InferencePutCustomRequest, options?: TransportRequestOptions): Promise - async putCustom (this: That, params: T.InferencePutCustomRequest | TB.InferencePutCustomRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'custom_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptions): Promise + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_custom'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -648,9 +1217,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -670,20 +1245,27 @@ export default class Inference { * Create a DeepSeek inference endpoint. Create an inference endpoint to perform an inference task with the `deepseek` service. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-deepseek.html | Elasticsearch API documentation} */ - async putDeepseek (this: That, params: T.InferencePutDeepseekRequest | TB.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putDeepseek (this: That, params: T.InferencePutDeepseekRequest | TB.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putDeepseek (this: That, params: T.InferencePutDeepseekRequest | TB.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise - async putDeepseek (this: That, params: T.InferencePutDeepseekRequest | TB.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'deepseek_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_deepseek'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -693,9 +1275,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -715,20 +1303,27 @@ export default class Inference { * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-elasticsearch.html | Elasticsearch API documentation} */ - async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest | TB.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest | TB.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest | TB.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise - async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest | TB.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'elasticsearch_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_elasticsearch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -738,9 +1333,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -760,20 +1361,27 @@ export default class Inference { * Create an ELSER inference endpoint. Create an inference endpoint to perform an inference task with the `elser` service. You can also deploy ELSER by using the Elasticsearch inference integration. > info > Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. The API request will automatically download and deploy the ELSER model if it isn't already downloaded. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-elser.html | Elasticsearch API documentation} */ - async putElser (this: That, params: T.InferencePutElserRequest | TB.InferencePutElserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putElser (this: That, params: T.InferencePutElserRequest | TB.InferencePutElserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putElser (this: That, params: T.InferencePutElserRequest | TB.InferencePutElserRequest, options?: TransportRequestOptions): Promise - async putElser (this: That, params: T.InferencePutElserRequest | TB.InferencePutElserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'elser_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_elser'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -783,9 +1391,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -805,20 +1419,27 @@ export default class Inference { * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-google-ai-studio.html | Elasticsearch API documentation} */ - async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest | TB.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest | TB.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest | TB.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise - async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest | TB.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'googleaistudio_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_googleaistudio'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -828,9 +1449,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -850,20 +1477,27 @@ export default class Inference { * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-google-vertex-ai.html | Elasticsearch API documentation} */ - async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest | TB.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest | TB.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest | TB.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise - async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest | TB.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'googlevertexai_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_googlevertexai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -873,9 +1507,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -895,20 +1535,27 @@ export default class Inference { * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. Supported tasks include: `text_embedding`, `completion`, and `chat_completion`. To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. Select a model that supports the task you intend to use. For Elastic's `text_embedding` task: The selected model must support the `Sentence Embeddings` task. On the new endpoint creation page, select the `Sentence Embeddings` task under the `Advanced Configuration` section. After the endpoint has initialized, copy the generated endpoint URL. Recommended models for `text_embedding` task: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` For Elastic's `chat_completion` and `completion` tasks: The selected model must support the `Text Generation` task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for `Text Generation`. When creating dedicated endpoint select the `Text Generation` task. After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes `/v1/chat/completions` part in URL. Then, copy the full endpoint URL for use. Recommended models for `chat_completion` and `completion` tasks: * `Mistral-7B-Instruct-v0.2` * `QwQ-32B` * `Phi-3-mini-128k-instruct` For Elastic's `rerank` task: The selected model must support the `sentence-ranking` task and expose OpenAI API. HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` so far. After the endpoint is initialized, copy the full endpoint URL for use. Tested models for `rerank` task: * `bge-reranker-base` * `jina-reranker-v1-turbo-en-GGUF` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-hugging-face.html | Elasticsearch API documentation} */ - async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest | TB.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest | TB.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest | TB.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise - async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest | TB.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'huggingface_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_hugging_face'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -918,9 +1565,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -940,20 +1593,27 @@ export default class Inference { * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-jinaai.html | Elasticsearch API documentation} */ - async putJinaai (this: That, params: T.InferencePutJinaaiRequest | TB.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putJinaai (this: That, params: T.InferencePutJinaaiRequest | TB.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putJinaai (this: That, params: T.InferencePutJinaaiRequest | TB.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise - async putJinaai (this: That, params: T.InferencePutJinaaiRequest | TB.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'jinaai_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_jinaai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -963,9 +1623,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -985,20 +1651,27 @@ export default class Inference { * Create a Mistral inference endpoint. Create an inference endpoint to perform an inference task with the `mistral` service. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-mistral.html | Elasticsearch API documentation} */ - async putMistral (this: That, params: T.InferencePutMistralRequest | TB.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putMistral (this: That, params: T.InferencePutMistralRequest | TB.InferencePutMistralRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putMistral (this: That, params: T.InferencePutMistralRequest | TB.InferencePutMistralRequest, options?: TransportRequestOptions): Promise - async putMistral (this: That, params: T.InferencePutMistralRequest | TB.InferencePutMistralRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'mistral_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_mistral'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1008,9 +1681,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1030,20 +1709,27 @@ export default class Inference { * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-openai.html | Elasticsearch API documentation} */ - async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise - async putOpenai (this: That, params: T.InferencePutOpenaiRequest | TB.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'openai_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_openai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1053,9 +1739,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1075,20 +1767,27 @@ export default class Inference { * Create a VoyageAI inference endpoint. Create an inference endpoint to perform an inference task with the `voyageai` service. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-voyageai.html | Elasticsearch API documentation} */ - async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise - async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest | TB.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'voyageai_inference_id'] - const acceptedBody: string[] = ['chunking_settings', 'service', 'service_settings', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_voyageai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1098,9 +1797,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1120,20 +1825,27 @@ export default class Inference { * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-service-watsonx-ai.html | Elasticsearch API documentation} */ - async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise - async putWatsonx (this: That, params: T.InferencePutWatsonxRequest | TB.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'watsonx_inference_id'] - const acceptedBody: string[] = ['service', 'service_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_watsonx'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1143,9 +1855,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1165,20 +1883,27 @@ export default class Inference { * Perform reranking inference on the service * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation} */ - async rerank (this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async rerank (this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptionsWithMeta): Promise> - async rerank (this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptions): Promise - async rerank (this: That, params: T.InferenceRerankRequest | TB.InferenceRerankRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id'] - const acceptedBody: string[] = ['query', 'input', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptions): Promise + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.rerank'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1188,9 +1913,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1209,20 +1940,27 @@ export default class Inference { * Perform sparse embedding inference on the service * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation} */ - async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise - async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest | TB.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id'] - const acceptedBody: string[] = ['input', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.sparse_embedding'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1232,9 +1970,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1253,20 +1997,27 @@ export default class Inference { * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/stream-inference-api.html | Elasticsearch API documentation} */ - async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> - async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise - async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest | TB.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id'] - const acceptedBody: string[] = ['input', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.stream_completion'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1276,9 +2027,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1297,20 +2054,27 @@ export default class Inference { * Perform text embedding inference on the service * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-inference-api.html | Elasticsearch API documentation} */ - async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise - async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest | TB.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id'] - const acceptedBody: string[] = ['input', 'task_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.text_embedding'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1320,9 +2084,15 @@ export default class Inference { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1341,25 +2111,35 @@ export default class Inference { * Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/update-inference-api.html | Elasticsearch API documentation} */ - async update (this: That, params: T.InferenceUpdateRequest | TB.InferenceUpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async update (this: That, params: T.InferenceUpdateRequest | TB.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async update (this: That, params: T.InferenceUpdateRequest | TB.InferenceUpdateRequest, options?: TransportRequestOptions): Promise - async update (this: That, params: T.InferenceUpdateRequest | TB.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id', 'task_type'] - const acceptedBody: string[] = ['inference_config'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.update'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/info.ts b/src/api/api/info.ts index a4634670c..8cc904630 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,26 +21,49 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + info: { + path: [], + body: [], + query: [] + } +} /** * Get cluster info. Get basic build, version, and cluster information. ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rest-api-root.html | Elasticsearch API documentation} */ -export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise -export default async function InfoApi (this: That, params?: T.InfoRequest | TB.InfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.info + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index e8c193c1e..405c56ae4 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,171 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ingest { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ingest.delete_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.geo_ip_stats': { + path: [], + body: [], + query: [] + }, + 'ingest.get_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'ingest.get_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'ingest.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'summary' + ] + }, + 'ingest.processor_grok': { + path: [], + body: [], + query: [] + }, + 'ingest.put_geoip_database': { + path: [ + 'id' + ], + body: [ + 'name', + 'maxmind' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_ip_location_database': { + path: [ + 'id' + ], + body: [ + 'configuration' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_pipeline': { + path: [ + 'id' + ], + body: [ + '_meta', + 'description', + 'on_failure', + 'processors', + 'version', + 'deprecated' + ], + query: [ + 'master_timeout', + 'timeout', + 'if_version' + ] + }, + 'ingest.simulate': { + path: [ + 'id' + ], + body: [ + 'docs', + 'pipeline' + ], + query: [ + 'verbose' + ] + } + } } /** * Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-delete-geoip-database | Elasticsearch API documentation} */ - async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise - async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_geoip_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +206,31 @@ export default class Ingest { * Delete IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-ip-location-database-api.html | Elasticsearch API documentation} */ - async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise - async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest | TB.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_ip_location_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,18 +251,31 @@ export default class Ingest { * Delete pipelines. Delete one or more ingest pipelines. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-pipeline-api.html | Elasticsearch API documentation} */ - async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise - async deletePipeline (this: That, params: T.IngestDeletePipelineRequest | TB.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -144,19 +296,32 @@ export default class Ingest { * Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/geoip-processor.html | Elasticsearch API documentation} */ - async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise - async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest | TB.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ingest.geo_ip_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -174,19 +339,32 @@ export default class Ingest { * Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-get-geoip-database | Elasticsearch API documentation} */ - async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise - async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_geoip_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -214,19 +392,32 @@ export default class Ingest { * Get IP geolocation database configurations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-ip-location-database-api.html | Elasticsearch API documentation} */ - async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise - async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest | TB.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_ip_location_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -254,19 +445,32 @@ export default class Ingest { * Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-pipeline-api.html | Elasticsearch API documentation} */ - async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise - async getPipeline (this: That, params?: T.IngestGetPipelineRequest | TB.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -294,19 +498,32 @@ export default class Ingest { * Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/grok-processor.html | Elasticsearch API documentation} */ - async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> - async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise - async processorGrok (this: That, params?: T.IngestProcessorGrokRequest | TB.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ingest.processor_grok'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -324,20 +541,27 @@ export default class Ingest { * Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-put-geoip-database | Elasticsearch API documentation} */ - async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise - async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['name', 'maxmind'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_geoip_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -347,9 +571,15 @@ export default class Ingest { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -368,25 +598,35 @@ export default class Ingest { * Create or update an IP geolocation database configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-ip-location-database-api.html | Elasticsearch API documentation} */ - async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise - async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest | TB.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['configuration'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_ip_location_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -405,20 +645,27 @@ export default class Ingest { * Create or update a pipeline. Changes made using this API take effect immediately. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ingest.html | Elasticsearch API documentation} */ - async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise - async putPipeline (this: That, params: T.IngestPutPipelineRequest | TB.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -428,9 +675,15 @@ export default class Ingest { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -449,20 +702,27 @@ export default class Ingest { * Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/simulate-pipeline-api.html | Elasticsearch API documentation} */ - async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise - async simulate (this: That, params: T.IngestSimulateRequest | TB.IngestSimulateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['docs', 'pipeline'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.simulate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -472,9 +732,15 @@ export default class Ingest { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index 0c2192e78..e07ccd7e3 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,57 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + knn_search: { + path: [ + 'index' + ], + body: [ + '_source', + 'docvalue_fields', + 'stored_fields', + 'fields', + 'filter', + 'knn' + ], + query: [ + 'routing' + ] + } +} /** * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/knn-search-api.html | Elasticsearch API documentation} */ -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'filter', 'knn'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise> +export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.knn_search + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +81,15 @@ export default async function KnnSearchApi (this: That, par body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 5e7763362..854da9ced 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,109 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class License { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'license.delete': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'license.get': { + path: [], + body: [], + query: [ + 'accept_enterprise', + 'local' + ] + }, + 'license.get_basic_status': { + path: [], + body: [], + query: [] + }, + 'license.get_trial_status': { + path: [], + body: [], + query: [] + }, + 'license.post': { + path: [], + body: [ + 'license', + 'licenses' + ], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_basic': { + path: [], + body: [], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_trial': { + path: [], + body: [], + query: [ + 'acknowledge', + 'type', + 'master_timeout' + ] + } + } } /** * Delete the license. When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-license.html | Elasticsearch API documentation} */ - async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params?: T.LicenseDeleteRequest | TB.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['license.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -78,19 +141,32 @@ export default class License { * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. >info > If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. > If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-license.html | Elasticsearch API documentation} */ - async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['license.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -108,19 +184,32 @@ export default class License { * Get the basic license status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-basic-status.html | Elasticsearch API documentation} */ - async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise - async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest | TB.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['license.get_basic_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -138,19 +227,32 @@ export default class License { * Get the trial status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-trial-status.html | Elasticsearch API documentation} */ - async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise - async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest | TB.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['license.get_trial_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -168,20 +270,27 @@ export default class License { * Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-license-post | Elasticsearch API documentation} */ - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise - async post (this: That, params?: T.LicensePostRequest | TB.LicensePostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['license', 'licenses'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['license.post'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -192,9 +301,15 @@ export default class License { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -210,19 +325,32 @@ export default class License { * Start a basic license. Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. To check the status of your basic license, use the get basic license API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/start-basic.html | Elasticsearch API documentation} */ - async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise - async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest | TB.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['license.post_start_basic'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -240,19 +368,32 @@ export default class License { * Start a trial. Start a 30-day trial, which gives access to all subscription features. NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/start-trial.html | Elasticsearch API documentation} */ - async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise - async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest | TB.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['license.post_start_trial'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index 8c2251b8b..7fcbc31f8 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,75 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Logstash { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'logstash.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.put_pipeline': { + path: [ + 'id' + ], + body: [ + 'pipeline' + ], + query: [] + } + } } /** * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/logstash-api-delete-pipeline.html | Elasticsearch API documentation} */ - async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise - async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest | TB.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['logstash.delete_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,19 +110,32 @@ export default class Logstash { * Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/logstash-api-get-pipeline.html | Elasticsearch API documentation} */ - async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise - async getPipeline (this: That, params?: T.LogstashGetPipelineRequest | TB.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['logstash.get_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -120,25 +163,35 @@ export default class Logstash { * Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/logstash-api-put-pipeline.html | Elasticsearch API documentation} */ - async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise - async putPipeline (this: That, params: T.LogstashPutPipelineRequest | TB.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['pipeline'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['logstash.put_pipeline'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index fc3d1283f..dd07d95a1 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,61 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + mget: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields' + ] + } +} /** * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. **Filter source fields** By default, the `_source` field is returned for every document (if stored). Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. **Get stored fields** Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-multi-get.html | Elasticsearch API documentation} */ -export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise> -export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'ids'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mget + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -66,9 +86,15 @@ export default async function MgetApi (this: That, params?: body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 7e44ccc2f..751c3a5fa 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,68 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Migration { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'migration.deprecations': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'migration.get_feature_upgrade_status': { + path: [], + body: [], + query: [] + }, + 'migration.post_feature_upgrade': { + path: [], + body: [], + query: [] + } + } } /** * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/migration-api-deprecation.html | Elasticsearch API documentation} */ - async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise - async deprecations (this: That, params?: T.MigrationDeprecationsRequest | TB.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['migration.deprecations'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -88,19 +110,32 @@ export default class Migration { * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/feature-migration-api.html | Elasticsearch API documentation} */ - async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise - async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest | TB.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['migration.get_feature_upgrade_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -118,19 +153,32 @@ export default class Migration { * Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/feature-migration-api.html | Elasticsearch API documentation} */ - async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise - async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest | TB.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['migration.post_feature_upgrade'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 2974edb05..dd8a049df 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,989 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ml { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ml.clear_trained_model_deployment_cache': { + path: [ + 'model_id' + ], + body: [], + query: [] + }, + 'ml.close_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.delete_calendar': { + path: [ + 'calendar_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_event': { + path: [ + 'calendar_id', + 'event_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.delete_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_datafeed': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'force' + ] + }, + 'ml.delete_expired_data': { + path: [ + 'job_id' + ], + body: [ + 'requests_per_second', + 'timeout' + ], + query: [ + 'requests_per_second', + 'timeout' + ] + }, + 'ml.delete_filter': { + path: [ + 'filter_id' + ], + body: [], + query: [] + }, + 'ml.delete_forecast': { + path: [ + 'job_id', + 'forecast_id' + ], + body: [], + query: [ + 'allow_no_forecasts', + 'timeout' + ] + }, + 'ml.delete_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'force', + 'delete_user_annotations', + 'wait_for_completion' + ] + }, + 'ml.delete_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [] + }, + 'ml.delete_trained_model': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [] + }, + 'ml.estimate_model_memory': { + path: [], + body: [ + 'analysis_config', + 'max_bucket_cardinality', + 'overall_cardinality' + ], + query: [] + }, + 'ml.evaluate_data_frame': { + path: [], + body: [ + 'evaluation', + 'index', + 'query' + ], + query: [] + }, + 'ml.explain_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'source', + 'dest', + 'analysis', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'analyzed_fields', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.flush_job': { + path: [ + 'job_id' + ], + body: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ], + query: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ] + }, + 'ml.forecast': { + path: [ + 'job_id' + ], + body: [ + 'duration', + 'expires_in', + 'max_model_memory' + ], + query: [ + 'duration', + 'expires_in', + 'max_model_memory' + ] + }, + 'ml.get_buckets': { + path: [ + 'job_id', + 'timestamp' + ], + body: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'page', + 'sort', + 'start' + ], + query: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_calendar_events': { + path: [ + 'calendar_id' + ], + body: [], + query: [ + 'end', + 'from', + 'job_id', + 'size', + 'start' + ] + }, + 'ml.get_calendars': { + path: [ + 'calendar_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_categories': { + path: [ + 'job_id', + 'category_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'partition_field_value', + 'size' + ] + }, + 'ml.get_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'ml.get_data_frame_analytics_stats': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'verbose' + ] + }, + 'ml.get_datafeed_stats': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_filters': { + path: [ + 'filter_id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_influencers': { + path: [ + 'job_id' + ], + body: [ + 'page' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'influencer_score', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_job_stats': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_memory_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ml.get_model_snapshot_upgrade_stats': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_model_snapshots': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'desc', + 'end', + 'page', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_overall_buckets': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ], + query: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ] + }, + 'ml.get_records': { + path: [ + 'job_id' + ], + body: [ + 'desc', + 'end', + 'exclude_interim', + 'page', + 'record_score', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'from', + 'record_score', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'decompress_definition', + 'exclude_generated', + 'from', + 'include', + 'include_model_definition', + 'size', + 'tags' + ] + }, + 'ml.get_trained_models_stats': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size' + ] + }, + 'ml.infer_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'docs', + 'inference_config' + ], + query: [ + 'timeout' + ] + }, + 'ml.info': { + path: [], + body: [], + query: [] + }, + 'ml.open_job': { + path: [ + 'job_id' + ], + body: [ + 'timeout' + ], + query: [ + 'timeout' + ] + }, + 'ml.post_calendar_events': { + path: [ + 'calendar_id' + ], + body: [ + 'events' + ], + query: [] + }, + 'ml.post_data': { + path: [ + 'job_id' + ], + body: [ + 'data' + ], + query: [ + 'reset_end', + 'reset_start' + ] + }, + 'ml.preview_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'config' + ], + query: [] + }, + 'ml.preview_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'datafeed_config', + 'job_config' + ], + query: [ + 'start', + 'end' + ] + }, + 'ml.put_calendar': { + path: [ + 'calendar_id' + ], + body: [ + 'job_ids', + 'description' + ], + query: [] + }, + 'ml.put_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.put_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'allow_lazy_start', + 'analysis', + 'analyzed_fields', + 'description', + 'dest', + 'max_num_threads', + '_meta', + 'model_memory_limit', + 'source', + 'headers', + 'version' + ], + query: [] + }, + 'ml.put_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'aggs', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'headers' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_filter': { + path: [ + 'filter_id' + ], + body: [ + 'description', + 'items' + ], + query: [] + }, + 'ml.put_job': { + path: [], + body: [ + 'allow_lazy_open', + 'analysis_config', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'daily_model_snapshot_retention_after_days', + 'data_description', + 'datafeed_config', + 'description', + 'job_id', + 'groups', + 'model_plot_config', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_index_name', + 'results_retention_days' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'compressed_definition', + 'definition', + 'description', + 'inference_config', + 'input', + 'metadata', + 'model_type', + 'model_size_bytes', + 'platform_architecture', + 'tags', + 'prefix_strings' + ], + query: [ + 'defer_definition_decompression', + 'wait_for_completion' + ] + }, + 'ml.put_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [ + 'reassign' + ] + }, + 'ml.put_trained_model_definition_part': { + path: [ + 'model_id', + 'part' + ], + body: [ + 'definition', + 'total_definition_length', + 'total_parts' + ], + query: [] + }, + 'ml.put_trained_model_vocabulary': { + path: [ + 'model_id' + ], + body: [ + 'vocabulary', + 'merges', + 'scores' + ], + query: [] + }, + 'ml.reset_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'delete_user_annotations' + ] + }, + 'ml.revert_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'delete_intervening_results' + ], + query: [ + 'delete_intervening_results' + ] + }, + 'ml.set_upgrade_mode': { + path: [], + body: [], + query: [ + 'enabled', + 'timeout' + ] + }, + 'ml.start_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ml.start_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'end', + 'start', + 'timeout' + ], + query: [ + 'end', + 'start', + 'timeout' + ] + }, + 'ml.start_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'adaptive_allocations' + ], + query: [ + 'cache_size', + 'deployment_id', + 'number_of_allocations', + 'priority', + 'queue_capacity', + 'threads_per_allocation', + 'timeout', + 'wait_for' + ] + }, + 'ml.stop_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force' + ] + }, + 'ml.update_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'description', + 'model_memory_limit', + 'max_num_threads', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.update_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.update_filter': { + path: [ + 'filter_id' + ], + body: [ + 'add_items', + 'description', + 'remove_items' + ], + query: [] + }, + 'ml.update_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_lazy_open', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'categorization_filters', + 'description', + 'model_plot_config', + 'model_prune_window', + 'daily_model_snapshot_retention_after_days', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_retention_days', + 'groups', + 'detectors', + 'per_partition_categorization' + ], + query: [] + }, + 'ml.update_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'description', + 'retain' + ], + query: [] + }, + 'ml.update_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'number_of_allocations', + 'adaptive_allocations' + ], + query: [ + 'number_of_allocations' + ] + }, + 'ml.upgrade_job_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'timeout' + ] + }, + 'ml.validate': { + path: [], + body: [ + 'job_id', + 'analysis_config', + 'analysis_limits', + 'data_description', + 'description', + 'model_plot', + 'model_snapshot_id', + 'model_snapshot_retention_days', + 'results_index_name' + ], + query: [] + }, + 'ml.validate_detector': { + path: [], + body: [ + 'detector' + ], + query: [] + } + } } /** * Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/clear-trained-model-deployment-cache.html | Elasticsearch API documentation} */ - async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise - async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest | TB.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.clear_trained_model_deployment_cache'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,20 +1024,27 @@ export default class Ml { * Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-close-job.html | Elasticsearch API documentation} */ - async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise - async closeJob (this: That, params: T.MlCloseJobRequest | TB.MlCloseJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.close_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -103,9 +1054,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -124,18 +1081,31 @@ export default class Ml { * Delete a calendar. Remove all scheduled events from a calendar, then delete it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-calendar.html | Elasticsearch API documentation} */ - async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise - async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const querystring: Record = {} - const body = undefined + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -156,18 +1126,31 @@ export default class Ml { * Delete events from a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-calendar-event.html | Elasticsearch API documentation} */ - async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise - async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'event_id'] - const querystring: Record = {} - const body = undefined + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar_event'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -189,18 +1172,31 @@ export default class Ml { * Delete anomaly jobs from a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-calendar-job.html | Elasticsearch API documentation} */ - async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise - async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'job_id'] - const querystring: Record = {} - const body = undefined + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -222,18 +1218,31 @@ export default class Ml { * Delete a data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-dfanalytics.html | Elasticsearch API documentation} */ - async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -254,18 +1263,31 @@ export default class Ml { * Delete a datafeed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-datafeed.html | Elasticsearch API documentation} */ - async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise - async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const querystring: Record = {} - const body = undefined + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -286,20 +1308,27 @@ export default class Ml { * Delete expired ML data. Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-expired-data.html | Elasticsearch API documentation} */ - async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise - async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['requests_per_second', 'timeout'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.delete_expired_data'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -310,9 +1339,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -338,18 +1373,31 @@ export default class Ml { * Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-filter.html | Elasticsearch API documentation} */ - async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise - async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const querystring: Record = {} - const body = undefined + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_filter'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -370,18 +1418,31 @@ export default class Ml { * Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-forecast.html | Elasticsearch API documentation} */ - async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise - async deleteForecast (this: That, params: T.MlDeleteForecastRequest | TB.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'forecast_id'] - const querystring: Record = {} - const body = undefined + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_forecast'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -410,18 +1471,31 @@ export default class Ml { * Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-job.html | Elasticsearch API documentation} */ - async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise - async deleteJob (this: That, params: T.MlDeleteJobRequest | TB.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -442,18 +1516,31 @@ export default class Ml { * Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-delete-snapshot.html | Elasticsearch API documentation} */ - async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise - async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest | TB.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const querystring: Record = {} - const body = undefined + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_model_snapshot'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -475,18 +1562,31 @@ export default class Ml { * Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-trained-models.html | Elasticsearch API documentation} */ - async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise - async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_trained_model'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -507,18 +1607,31 @@ export default class Ml { * Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-trained-models-aliases.html | Elasticsearch API documentation} */ - async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise - async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_alias', 'model_id'] - const querystring: Record = {} - const body = undefined + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_trained_model_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -540,20 +1653,27 @@ export default class Ml { * Estimate job model memory usage. Make an estimation of the memory usage for an anomaly detection job model. The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-estimate-model-memory.html | Elasticsearch API documentation} */ - async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise - async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.estimate_model_memory'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -564,9 +1684,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -582,20 +1708,27 @@ export default class Ml { * Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/evaluate-dfanalytics.html | Elasticsearch API documentation} */ - async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> - async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise - async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['evaluation', 'index', 'query'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.evaluate_data_frame'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -605,9 +1738,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -623,20 +1762,27 @@ export default class Ml { * Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/explain-dfanalytics.html | Elasticsearch API documentation} */ - async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.explain_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -647,9 +1793,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -675,20 +1827,27 @@ export default class Ml { * Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-flush-job.html | Elasticsearch API documentation} */ - async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise - async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'skip_time', 'start'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.flush_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -698,9 +1857,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -719,20 +1884,27 @@ export default class Ml { * Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-forecast.html | Elasticsearch API documentation} */ - async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> - async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise - async forecast (this: That, params: T.MlForecastRequest | TB.MlForecastRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.forecast'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -742,9 +1914,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -763,20 +1941,27 @@ export default class Ml { * Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-bucket.html | Elasticsearch API documentation} */ - async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise - async getBuckets (this: That, params: T.MlGetBucketsRequest | TB.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'timestamp'] - const acceptedBody: string[] = ['anomaly_score', 'desc', 'end', 'exclude_interim', 'expand', 'page', 'sort', 'start'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_buckets'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -786,9 +1971,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -815,18 +2006,31 @@ export default class Ml { * Get info about events in calendars. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-calendar-event.html | Elasticsearch API documentation} */ - async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise - async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const querystring: Record = {} - const body = undefined + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_calendar_events'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -847,20 +2051,27 @@ export default class Ml { * Get calendar configuration info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-calendar.html | Elasticsearch API documentation} */ - async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise - async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['page'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_calendars'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -871,9 +2082,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -899,20 +2116,27 @@ export default class Ml { * Get anomaly detection job results for categories. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-category.html | Elasticsearch API documentation} */ - async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise - async getCategories (this: That, params: T.MlGetCategoriesRequest | TB.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'category_id'] - const acceptedBody: string[] = ['page'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_categories'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -922,9 +2146,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -951,19 +2181,32 @@ export default class Ml { * Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-dfanalytics.html | Elasticsearch API documentation} */ - async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -991,19 +2234,32 @@ export default class Ml { * Get data frame analytics job stats. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-dfanalytics-stats.html | Elasticsearch API documentation} */ - async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise - async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_data_frame_analytics_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1031,19 +2287,32 @@ export default class Ml { * Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-datafeed-stats.html | Elasticsearch API documentation} */ - async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise - async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const querystring: Record = {} - const body = undefined + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_datafeed_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1071,19 +2340,32 @@ export default class Ml { * Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-datafeed.html | Elasticsearch API documentation} */ - async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise - async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const querystring: Record = {} - const body = undefined + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_datafeeds'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1111,19 +2393,32 @@ export default class Ml { * Get filters. You can get a single filter or all filters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-filter.html | Elasticsearch API documentation} */ - async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise - async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const querystring: Record = {} - const body = undefined + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_filters'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1151,20 +2446,27 @@ export default class Ml { * Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-influencer.html | Elasticsearch API documentation} */ - async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise - async getInfluencers (this: That, params: T.MlGetInfluencersRequest | TB.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['page'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_influencers'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1174,9 +2476,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1195,19 +2503,32 @@ export default class Ml { * Get anomaly detection job stats. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-job-stats.html | Elasticsearch API documentation} */ - async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise - async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_job_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1235,19 +2556,32 @@ export default class Ml { * Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-job.html | Elasticsearch API documentation} */ - async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise - async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_jobs'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1275,19 +2609,32 @@ export default class Ml { * Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-ml-memory.html | Elasticsearch API documentation} */ - async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise - async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest | TB.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_memory_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1315,18 +2662,31 @@ export default class Ml { * Get anomaly detection job model snapshot upgrade usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-job-model-snapshot-upgrade-stats.html | Elasticsearch API documentation} */ - async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise - async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest | TB.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const querystring: Record = {} - const body = undefined + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_model_snapshot_upgrade_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1348,20 +2708,27 @@ export default class Ml { * Get model snapshots info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-snapshot.html | Elasticsearch API documentation} */ - async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise - async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest | TB.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['desc', 'end', 'page', 'sort', 'start'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_model_snapshots'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1371,9 +2738,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1400,20 +2773,27 @@ export default class Ml { * Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-overall-buckets.html | Elasticsearch API documentation} */ - async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise - async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_match', 'bucket_span', 'end', 'exclude_interim', 'overall_score', 'start', 'top_n'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_overall_buckets'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1423,9 +2803,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1444,20 +2830,27 @@ export default class Ml { * Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-get-record.html | Elasticsearch API documentation} */ - async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise - async getRecords (this: That, params: T.MlGetRecordsRequest | TB.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['desc', 'end', 'exclude_interim', 'page', 'record_score', 'sort', 'start'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_records'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1467,9 +2860,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1488,19 +2887,32 @@ export default class Ml { * Get trained model configuration info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-trained-models.html | Elasticsearch API documentation} */ - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise - async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_trained_models'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1528,19 +2940,32 @@ export default class Ml { * Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-trained-models-stats.html | Elasticsearch API documentation} */ - async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise - async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.get_trained_models_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1568,20 +2993,27 @@ export default class Ml { * Evaluate a trained model. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/infer-trained-model.html | Elasticsearch API documentation} */ - async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise - async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['docs', 'inference_config'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.infer_trained_model'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1591,9 +3023,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1612,19 +3050,32 @@ export default class Ml { * Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-ml-info.html | Elasticsearch API documentation} */ - async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise - async info (this: That, params?: T.MlInfoRequest | TB.MlInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1642,20 +3093,27 @@ export default class Ml { * Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-open-job.html | Elasticsearch API documentation} */ - async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise - async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['timeout'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.open_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1665,9 +3123,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1686,20 +3150,27 @@ export default class Ml { * Add scheduled events to the calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-post-calendar-event.html | Elasticsearch API documentation} */ - async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise - async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['events'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.post_calendar_events'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1709,9 +3180,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1730,25 +3207,35 @@ export default class Ml { * Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-post-data.html | Elasticsearch API documentation} */ - async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise - async postData (this: That, params: T.MlPostDataRequest | TB.MlPostDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['data'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.post_data'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1767,20 +3254,27 @@ export default class Ml { * Preview features used by data frame analytics. Preview the extracted features used by a data frame analytics config. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/preview-dfanalytics.html | Elasticsearch API documentation} */ - async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['config'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.preview_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1791,9 +3285,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1819,20 +3319,27 @@ export default class Ml { * Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-preview-datafeed.html | Elasticsearch API documentation} */ - async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> - async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['datafeed_config', 'job_config'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.preview_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1843,9 +3350,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1871,20 +3384,27 @@ export default class Ml { * Create a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-put-calendar.html | Elasticsearch API documentation} */ - async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise - async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['job_ids', 'description'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_calendar'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1894,9 +3414,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1915,18 +3441,31 @@ export default class Ml { * Add anomaly detection job to calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-put-calendar-job.html | Elasticsearch API documentation} */ - async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise - async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'job_id'] - const querystring: Record = {} - const body = undefined + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.put_calendar_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1948,20 +3487,27 @@ export default class Ml { * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. If the destination index does not exist, it is created automatically when you start the job. If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-dfanalytics.html | Elasticsearch API documentation} */ - async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1971,9 +3517,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1992,20 +3544,27 @@ export default class Ml { * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-put-datafeed.html | Elasticsearch API documentation} */ - async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise - async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2015,9 +3574,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2036,20 +3601,27 @@ export default class Ml { * Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-put-filter.html | Elasticsearch API documentation} */ - async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise - async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const acceptedBody: string[] = ['description', 'items'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_filter'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2059,9 +3631,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2080,20 +3658,27 @@ export default class Ml { * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-put-job.html | Elasticsearch API documentation} */ - async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise - async putJob (this: That, params: T.MlPutJobRequest | TB.MlPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2103,9 +3688,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2124,20 +3715,27 @@ export default class Ml { * Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-trained-models.html | Elasticsearch API documentation} */ - async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise - async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags', 'prefix_strings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2147,9 +3745,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2168,18 +3772,31 @@ export default class Ml { * Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-trained-models-aliases.html | Elasticsearch API documentation} */ - async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise - async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_alias', 'model_id'] - const querystring: Record = {} - const body = undefined + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.put_trained_model_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2201,20 +3818,27 @@ export default class Ml { * Create part of a trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-trained-model-definition-part.html | Elasticsearch API documentation} */ - async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise - async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id', 'part'] - const acceptedBody: string[] = ['definition', 'total_definition_length', 'total_parts'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model_definition_part'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2224,9 +3848,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2246,20 +3876,27 @@ export default class Ml { * Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-trained-model-vocabulary.html | Elasticsearch API documentation} */ - async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise - async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['vocabulary', 'merges', 'scores'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model_vocabulary'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2269,9 +3906,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2290,18 +3933,31 @@ export default class Ml { * Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-reset-job.html | Elasticsearch API documentation} */ - async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise - async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const querystring: Record = {} - const body = undefined + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.reset_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2322,20 +3978,27 @@ export default class Ml { * Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-revert-snapshot.html | Elasticsearch API documentation} */ - async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> - async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise - async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest | TB.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['delete_intervening_results'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.revert_model_snapshot'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2345,9 +4008,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2367,19 +4036,32 @@ export default class Ml { * Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-set-upgrade-mode.html | Elasticsearch API documentation} */ - async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise - async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest | TB.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.set_upgrade_mode'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2397,18 +4079,31 @@ export default class Ml { * Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/start-dfanalytics.html | Elasticsearch API documentation} */ - async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.start_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2429,20 +4124,27 @@ export default class Ml { * Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-start-datafeed.html | Elasticsearch API documentation} */ - async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise - async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['end', 'start', 'timeout'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.start_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2452,9 +4154,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2473,20 +4181,27 @@ export default class Ml { * Start a trained model deployment. It allocates the model to every machine learning node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/start-trained-model-deployment.html | Elasticsearch API documentation} */ - async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise - async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['adaptive_allocations'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.start_trained_model_deployment'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2496,9 +4211,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2517,18 +4238,31 @@ export default class Ml { * Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/stop-dfanalytics.html | Elasticsearch API documentation} */ - async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.stop_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2549,20 +4283,27 @@ export default class Ml { * Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-stop-datafeed.html | Elasticsearch API documentation} */ - async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise - async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.stop_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2572,9 +4313,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2593,18 +4340,31 @@ export default class Ml { * Stop a trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/stop-trained-model-deployment.html | Elasticsearch API documentation} */ - async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise - async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const querystring: Record = {} - const body = undefined + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.stop_trained_model_deployment'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2625,20 +4385,27 @@ export default class Ml { * Update a data frame analytics job. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ml-update-data-frame-analytics | Elasticsearch API documentation} */ - async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise - async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2648,9 +4415,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2669,20 +4442,27 @@ export default class Ml { * Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-update-datafeed.html | Elasticsearch API documentation} */ - async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise - async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2692,9 +4472,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2713,20 +4499,27 @@ export default class Ml { * Update a filter. Updates the description of a filter, adds items, or removes items from the list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-update-filter.html | Elasticsearch API documentation} */ - async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise - async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_filter'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2736,9 +4529,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2757,20 +4556,27 @@ export default class Ml { * Update an anomaly detection job. Updates certain properties of an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-update-job.html | Elasticsearch API documentation} */ - async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise - async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'model_prune_window', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2780,9 +4586,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2801,20 +4613,27 @@ export default class Ml { * Update a snapshot. Updates certain properties of a snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-update-snapshot.html | Elasticsearch API documentation} */ - async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise - async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest | TB.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['description', 'retain'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_model_snapshot'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2824,9 +4643,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2846,20 +4671,27 @@ export default class Ml { * Update a trained model deployment. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ml-update-trained-model-deployment | Elasticsearch API documentation} */ - async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise - async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['number_of_allocations', 'adaptive_allocations'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_trained_model_deployment'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2869,9 +4701,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2890,18 +4728,31 @@ export default class Ml { * Upgrade a snapshot. Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/ml-upgrade-job-model-snapshot.html | Elasticsearch API documentation} */ - async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> - async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise - async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const querystring: Record = {} - const body = undefined + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ml.upgrade_job_snapshot'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2923,20 +4774,27 @@ export default class Ml { * Validate an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/machine-learning/8.19/ml-jobs.html | Elasticsearch API documentation} */ - async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise - async validate (this: That, params?: T.MlValidateRequest | TB.MlValidateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_id', 'model_snapshot_retention_days', 'results_index_name'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.validate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -2947,9 +4805,15 @@ export default class Ml { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2965,25 +4829,35 @@ export default class Ml { * Validate an anomaly detection job. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8 | Elasticsearch API documentation} */ - async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> - async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise - async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['detector'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.validate_detector'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 8a10dcea9..c6b50e311 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,38 +21,69 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Monitoring { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'monitoring.bulk': { + path: [ + 'type' + ], + body: [ + 'operations' + ], + query: [ + 'system_id', + 'system_api_version', + 'interval' + ] + } + } } /** * Send monitoring data. This API is used by the monitoring features to send monitoring data. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8 | Elasticsearch API documentation} */ - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise - async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['type'] - const acceptedBody: string[] = ['operations'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['monitoring.bulk'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 48a5daf02..a644b64d8 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,73 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + msearch: { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'index', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'rest_total_hits_as_int', + 'routing', + 'search_type', + 'typed_keys' + ] + } +} /** * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-multi-search.html | Elasticsearch API documentation} */ -export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise> -export default async function MsearchApi> (this: That, params: T.MsearchRequest | TB.MsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['searches'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index af6aaff87..725eee13c 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,64 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + msearch_template: { + path: [ + 'index' + ], + body: [ + 'search_templates' + ], + query: [ + 'ccs_minimize_roundtrips', + 'max_concurrent_searches', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} /** * Run multiple templated searches. Run multiple templated searches with a single request. If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. For example: ``` $ cat requests { "index": "my-index" } { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} { "index": "my-other-index" } { "id": "my-other-search-template", "params": { "query_type": "match_all" }} $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/multi-search-template.html | Elasticsearch API documentation} */ -export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> -export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['search_templates'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch_template + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index f9dc82c84..8bd594e98 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,64 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + mtermvectors: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'ids', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} /** * Get multiple term vectors. Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. **Artificial documents** You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-multi-termvectors.html | Elasticsearch API documentation} */ -export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise -export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'ids'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mtermvectors + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -66,9 +89,15 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 9c8cbd3a4..ae59e8eb2 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,133 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Nodes { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'nodes.clear_repositories_metering_archive': { + path: [ + 'node_id', + 'max_archive_version' + ], + body: [], + query: [] + }, + 'nodes.get_repositories_metering_info': { + path: [ + 'node_id' + ], + body: [], + query: [] + }, + 'nodes.hot_threads': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'ignore_idle_threads', + 'interval', + 'snapshots', + 'threads', + 'timeout', + 'type', + 'sort' + ] + }, + 'nodes.info': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'flat_settings', + 'timeout' + ] + }, + 'nodes.reload_secure_settings': { + path: [ + 'node_id' + ], + body: [ + 'secure_settings_password' + ], + query: [ + 'timeout' + ] + }, + 'nodes.stats': { + path: [ + 'node_id', + 'metric', + 'index_metric' + ], + body: [], + query: [ + 'completion_fields', + 'fielddata_fields', + 'fields', + 'groups', + 'include_segment_file_sizes', + 'level', + 'timeout', + 'types', + 'include_unloaded_segments' + ] + }, + 'nodes.usage': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'timeout' + ] + } + } } /** * Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/clear-repositories-metering-archive-api.html | Elasticsearch API documentation} */ - async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise - async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest | TB.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'max_archive_version'] - const querystring: Record = {} - const body = undefined + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['nodes.clear_repositories_metering_archive'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -81,18 +169,31 @@ export default class Nodes { * Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-repositories-metering-api.html | Elasticsearch API documentation} */ - async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise - async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest | TB.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['nodes.get_repositories_metering_info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -113,19 +214,32 @@ export default class Nodes { * Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-nodes-hot-threads.html | Elasticsearch API documentation} */ - async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise - async hotThreads (this: That, params?: T.NodesHotThreadsRequest | TB.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['nodes.hot_threads'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -153,19 +267,32 @@ export default class Nodes { * Get node information. By default, the API returns all attributes and core settings for cluster nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-nodes-info.html | Elasticsearch API documentation} */ - async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise - async info (this: That, params?: T.NodesInfoRequest | TB.NodesInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric'] - const querystring: Record = {} - const body = undefined + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['nodes.info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -200,20 +327,27 @@ export default class Nodes { * Reload the keystore on nodes in the cluster. Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-nodes-reload-secure-settings.html | Elasticsearch API documentation} */ - async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise - async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const acceptedBody: string[] = ['secure_settings_password'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['nodes.reload_secure_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -224,9 +358,15 @@ export default class Nodes { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -252,19 +392,32 @@ export default class Nodes { * Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-nodes-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.NodesStatsRequest | TB.NodesStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] - const querystring: Record = {} - const body = undefined + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['nodes.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -306,19 +459,32 @@ export default class Nodes { * Get feature usage information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster-nodes-usage.html | Elasticsearch API documentation} */ - async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> - async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise - async usage (this: That, params?: T.NodesUsageRequest | TB.NodesUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric'] - const querystring: Record = {} - const body = undefined + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['nodes.usage'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 58af47c52..978a61869 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,58 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + open_point_in_time: { + path: [ + 'index' + ], + body: [ + 'index_filter' + ], + query: [ + 'keep_alive', + 'ignore_unavailable', + 'preference', + 'routing', + 'expand_wildcards', + 'allow_partial_search_results', + 'max_concurrent_shard_requests' + ] + } +} /** * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/point-in-time-api.html | Elasticsearch API documentation} */ -export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise -export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest | TB.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['index_filter'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.open_point_in_time + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +82,15 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 8d734db34..2b5aa2786 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,26 +21,49 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + ping: { + path: [], + body: [], + query: [] + } +} /** * Ping the cluster. Get information about whether the cluster is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/cluster.html | Elasticsearch API documentation} */ -export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise -export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.ping + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts index b5cdda5dd..a3b951cb0 100644 --- a/src/api/api/profiling.ts +++ b/src/api/api/profiling.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,71 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Profiling { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'profiling.flamegraph': { + path: [], + body: [], + query: [] + }, + 'profiling.stacktraces': { + path: [], + body: [], + query: [] + }, + 'profiling.status': { + path: [], + body: [], + query: [] + }, + 'profiling.topn_functions': { + path: [], + body: [], + query: [] + } + } } /** * Extracts a UI-optimized structure to render flamegraphs from Universal Profiling. * @see {@link https://www.elastic.co/guide/en/observability/8.19/universal-profiling.html | Elasticsearch API documentation} */ - async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async flamegraph (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['profiling.flamegraph'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -77,19 +102,32 @@ export default class Profiling { * Extracts raw stacktrace information from Universal Profiling. * @see {@link https://www.elastic.co/guide/en/observability/8.19/universal-profiling.html | Elasticsearch API documentation} */ - async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async stacktraces (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['profiling.stacktraces'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -106,19 +144,32 @@ export default class Profiling { * Returns basic information about the status of Universal Profiling. * @see {@link https://www.elastic.co/guide/en/observability/8.19/universal-profiling.html | Elasticsearch API documentation} */ - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['profiling.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -135,19 +186,32 @@ export default class Profiling { * Extracts a list of topN functions from Universal Profiling. * @see {@link https://www.elastic.co/guide/en/observability/8.19/universal-profiling.html | Elasticsearch API documentation} */ - async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async topnFunctions (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['profiling.topn_functions'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index ac5b92a38..57fd5e20b 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,55 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + put_script: { + path: [ + 'id', + 'context' + ], + body: [ + 'script' + ], + query: [ + 'context', + 'master_timeout', + 'timeout' + ] + } +} /** * Create or update a script or search template. Creates or updates a stored script or search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/create-stored-script-api.html | Elasticsearch API documentation} */ -export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise -export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'context'] - const acceptedBody: string[] = ['script'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.put_script + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +79,15 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index 3930697a6..862861eed 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,121 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class QueryRules { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'query_rules.delete_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.delete_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.get_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.get_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.list_rulesets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'query_rules.put_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [ + 'type', + 'criteria', + 'actions', + 'priority' + ], + query: [] + }, + 'query_rules.put_ruleset': { + path: [ + 'ruleset_id' + ], + body: [ + 'rules' + ], + query: [] + }, + 'query_rules.test': { + path: [ + 'ruleset_id' + ], + body: [ + 'match_criteria' + ], + query: [] + } + } } /** * Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-query-rule.html | Elasticsearch API documentation} */ - async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise - async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest | TB.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] - const querystring: Record = {} - const body = undefined + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['query_rules.delete_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -81,18 +157,31 @@ export default class QueryRules { * Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-query-ruleset.html | Elasticsearch API documentation} */ - async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise - async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest | TB.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const querystring: Record = {} - const body = undefined + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['query_rules.delete_ruleset'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -113,18 +202,31 @@ export default class QueryRules { * Get a query rule. Get details about a query rule within a query ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-query-rule.html | Elasticsearch API documentation} */ - async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise - async getRule (this: That, params: T.QueryRulesGetRuleRequest | TB.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] - const querystring: Record = {} - const body = undefined + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['query_rules.get_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -146,18 +248,31 @@ export default class QueryRules { * Get a query ruleset. Get details about a query ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-query-ruleset.html | Elasticsearch API documentation} */ - async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise - async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest | TB.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const querystring: Record = {} - const body = undefined + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['query_rules.get_ruleset'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -178,19 +293,32 @@ export default class QueryRules { * Get all query rulesets. Get summarized information about the query rulesets. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/list-query-rulesets.html | Elasticsearch API documentation} */ - async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise - async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest | TB.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['query_rules.list_rulesets'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -208,20 +336,27 @@ export default class QueryRules { * Create or update a query rule. Create or update a query rule within a query ruleset. IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-query-rule.html | Elasticsearch API documentation} */ - async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise - async putRule (this: That, params: T.QueryRulesPutRuleRequest | TB.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] - const acceptedBody: string[] = ['type', 'criteria', 'actions', 'priority'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.put_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -231,9 +366,15 @@ export default class QueryRules { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -253,20 +394,27 @@ export default class QueryRules { * Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-query-ruleset.html | Elasticsearch API documentation} */ - async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise - async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest | TB.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['rules'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.put_ruleset'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -276,9 +424,15 @@ export default class QueryRules { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -297,20 +451,27 @@ export default class QueryRules { * Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/test-query-ruleset.html | Elasticsearch API documentation} */ - async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> - async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptions): Promise - async test (this: That, params: T.QueryRulesTestRequest | TB.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['match_criteria'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.test'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -320,9 +481,15 @@ export default class QueryRules { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index 532af16b9..d0dfaaec8 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,56 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + rank_eval: { + path: [ + 'index' + ], + body: [ + 'requests', + 'metric' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'search_type' + ] + } +} /** * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-rank-eval.html | Elasticsearch API documentation} */ -export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise -export default async function RankEvalApi (this: That, params: T.RankEvalRequest | TB.RankEvalRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['requests', 'metric'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.rank_eval + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +80,15 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 6deb7cb9c..ee1a776dc 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,63 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + reindex: { + path: [], + body: [ + 'conflicts', + 'dest', + 'max_docs', + 'script', + 'size', + 'source' + ], + query: [ + 'refresh', + 'requests_per_second', + 'scroll', + 'slices', + 'max_docs', + 'timeout', + 'wait_for_active_shards', + 'wait_for_completion', + 'require_alias' + ] + } +} /** * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. NOTE: The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. **Modify documents during reindexing** Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: * `_id` * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. WARNING: Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-reindex.html | Elasticsearch API documentation} */ -export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise -export default async function ReindexApi (this: That, params: T.ReindexRequest | TB.ReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.reindex + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +87,15 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest | body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index 33c3c0830..263f875ad 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,52 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + reindex_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-reindex.html | Elasticsearch API documentation} */ -export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise -export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest | TB.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.reindex_rethrottle + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 03865b259..33532c8d5 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,51 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + render_search_template: { + path: [], + body: [ + 'id', + 'file', + 'params', + 'source' + ], + query: [] + } +} /** * Render a search template. Render a search template as a search request body. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/render-search-template-api.html | Elasticsearch API documentation} */ -export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise -export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'file', 'params', 'source'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.render_search_template + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -66,9 +76,15 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index fc5f83567..15c13dba3 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,128 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Rollup { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'rollup.delete_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_jobs': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_caps': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_index_caps': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'rollup.put_job': { + path: [ + 'id' + ], + body: [ + 'cron', + 'groups', + 'index_pattern', + 'metrics', + 'page_size', + 'rollup_index', + 'timeout', + 'headers' + ], + query: [] + }, + 'rollup.rollup_search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'query', + 'size' + ], + query: [ + 'rest_total_hits_as_int', + 'typed_keys' + ] + }, + 'rollup.start_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.stop_job': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + } + } } /** * Delete a rollup job. A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": { "_rollup.id": "the_rollup_job_id" } } } ``` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rollup-delete-job.html | Elasticsearch API documentation} */ - async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise - async deleteJob (this: That, params: T.RollupDeleteJobRequest | TB.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['rollup.delete_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,19 +163,32 @@ export default class Rollup { * Get rollup job information. Get the configuration, stats, and status of rollup jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rollup-get-job.html | Elasticsearch API documentation} */ - async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise - async getJobs (this: That, params?: T.RollupGetJobsRequest | TB.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_jobs'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -120,19 +216,32 @@ export default class Rollup { * Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: 1. Does this index have associated rollup data somewhere in the cluster? 2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rollup-get-rollup-caps.html | Elasticsearch API documentation} */ - async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise - async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest | TB.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_rollup_caps'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -160,18 +269,31 @@ export default class Rollup { * Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: * What jobs are stored in an index (or indices specified via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rollup-get-rollup-index-caps.html | Elasticsearch API documentation} */ - async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise - async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest | TB.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_rollup_index_caps'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -192,20 +314,27 @@ export default class Rollup { * Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rollup-put-job.html | Elasticsearch API documentation} */ - async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise - async putJob (this: That, params: T.RollupPutJobRequest | TB.RollupPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index', 'timeout', 'headers'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['rollup.put_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -215,9 +344,15 @@ export default class Rollup { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -236,20 +371,27 @@ export default class Rollup { * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" } } } } ``` The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rollup-search.html | Elasticsearch API documentation} */ - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> - async rollupSearch> (this: That, params: T.RollupRollupSearchRequest | TB.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['rollup.rollup_search'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -259,9 +401,15 @@ export default class Rollup { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -280,18 +428,31 @@ export default class Rollup { * Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rollup-start-job.html | Elasticsearch API documentation} */ - async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise - async startJob (this: That, params: T.RollupStartJobRequest | TB.RollupStartJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['rollup.start_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -312,18 +473,31 @@ export default class Rollup { * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s ``` The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/rollup-stop-job.html | Elasticsearch API documentation} */ - async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise - async stopJob (this: That, params: T.RollupStopJobRequest | TB.RollupStopJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['rollup.stop_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index 1c9e534c8..7c9d4233f 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,50 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scripts_painless_execute: { + path: [], + body: [ + 'context', + 'context_setup', + 'script' + ], + query: [] + } +} /** * Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script. * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/8.19/painless-execute-api.html | Elasticsearch API documentation} */ -export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> -export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['context', 'context_setup', 'script'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scripts_painless_execute + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -66,9 +75,15 @@ export default async function ScriptsPainlessExecuteApi (this body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 3effaf4f0..5ee078a7c 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,53 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scroll: { + path: [], + body: [ + 'scroll', + 'scroll_id' + ], + query: [ + 'scroll', + 'scroll_id', + 'rest_total_hits_as_int' + ] + } +} /** * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/scroll-api.html | Elasticsearch API documentation} */ -export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise> -export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['scroll', 'scroll_id'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scroll + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +77,15 @@ export default async function ScrollApi = { + search: { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'rank', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'retriever', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'lenient', + 'max_concurrent_shard_requests', + 'min_compatible_shard_node', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'force_synthetic_source' + ] + } +} /** * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-search.html | Elasticsearch API documentation} */ -export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise> -export default async function SearchApi> (this: That, params?: T.SearchRequest | TB.SearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'retriever', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} for (const key in params) { if (acceptedBody.includes(key)) { body = body ?? {} - // @ts-expect-error if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line - // @ts-expect-error querystring[key] = params[key] } else { // @ts-expect-error @@ -72,9 +159,15 @@ export default async function SearchApi +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchApplication { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'search_application.delete': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.delete_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.list': { + path: [], + body: [], + query: [ + 'q', + 'from', + 'size' + ] + }, + 'search_application.post_behavioral_analytics_event': { + path: [ + 'collection_name', + 'event_type' + ], + body: [ + 'payload' + ], + query: [ + 'debug' + ] + }, + 'search_application.put': { + path: [ + 'name' + ], + body: [ + 'search_application' + ], + query: [ + 'create' + ] + }, + 'search_application.put_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.render_query': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [] + }, + 'search_application.search': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [ + 'typed_keys' + ] + } + } } /** * Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-search-application.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.SearchApplicationDeleteRequest | TB.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['search_application.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +174,31 @@ export default class SearchApplication { * Delete a behavioral analytics collection. The associated data stream is also deleted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-analytics-collection.html | Elasticsearch API documentation} */ - async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise - async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest | TB.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['search_application.delete_behavioral_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,18 +219,31 @@ export default class SearchApplication { * Get search application details. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-search-application.html | Elasticsearch API documentation} */ - async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.SearchApplicationGetRequest | TB.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['search_application.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -144,19 +264,32 @@ export default class SearchApplication { * Get behavioral analytics collections. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/list-analytics-collection.html | Elasticsearch API documentation} */ - async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise - async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest | TB.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['search_application.get_behavioral_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -184,19 +317,32 @@ export default class SearchApplication { * Get search applications. Get information about search applications. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/list-analytics-collection.html | Elasticsearch API documentation} */ - async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptions): Promise - async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['search_application.list'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -214,25 +360,35 @@ export default class SearchApplication { * Create a behavioral analytics collection event. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/post-analytics-collection-event.html | Elasticsearch API documentation} */ - async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest | TB.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest | TB.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> - async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest | TB.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise - async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest | TB.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['collection_name', 'event_type'] - const acceptedBody: string[] = ['payload'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.post_behavioral_analytics_event'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -252,25 +408,35 @@ export default class SearchApplication { * Create or update a search application. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-search-application.html | Elasticsearch API documentation} */ - async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise - async put (this: That, params: T.SearchApplicationPutRequest | TB.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['search_application'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.put'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -289,18 +455,31 @@ export default class SearchApplication { * Create a behavioral analytics collection. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-analytics-collection.html | Elasticsearch API documentation} */ - async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise - async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest | TB.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['search_application.put_behavioral_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -321,20 +500,27 @@ export default class SearchApplication { * Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-application-render-query.html | Elasticsearch API documentation} */ - async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest | TB.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest | TB.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest | TB.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise - async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest | TB.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['params'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.render_query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -344,9 +530,15 @@ export default class SearchApplication { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -365,20 +557,27 @@ export default class SearchApplication { * Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-application-search.html | Elasticsearch API documentation} */ - async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> - async search> (this: That, params: T.SearchApplicationSearchRequest | TB.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['params'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.search'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -388,9 +587,15 @@ export default class SearchApplication { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index 29c092d5b..fb025d7dc 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,76 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_mvt: { + path: [ + 'index', + 'field', + 'zoom', + 'x', + 'y' + ], + body: [ + 'aggs', + 'buffer', + 'exact_bounds', + 'extent', + 'fields', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'query', + 'runtime_mappings', + 'size', + 'sort', + 'track_total_hits', + 'with_labels' + ], + query: [ + 'exact_bounds', + 'extent', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'size', + 'track_total_hits', + 'with_labels' + ] + } +} /** * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search ``` GET my-index/_search { "size": 10000, "query": { "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "aggregations": { "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": false } } } } ``` The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-vector-tile-api.html | Elasticsearch API documentation} */ -export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise -export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] - const acceptedBody: string[] = ['aggs', 'buffer', 'exact_bounds', 'extent', 'fields', 'grid_agg', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits', 'with_labels'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_mvt + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +100,15 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index 1eae24f53..59b0be14a 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,26 +21,59 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + search_shards: { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'preference', + 'routing' + ] + } +} /** * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the `indices` section. If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-shards.html | Elasticsearch API documentation} */ -export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise -export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.search_shards + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index c20707f66..de6b53aeb 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,68 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_template: { + path: [ + 'index' + ], + body: [ + 'explain', + 'id', + 'params', + 'profile', + 'source' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'preference', + 'profile', + 'routing', + 'scroll', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} /** * Run a search with a search template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-template-api.html | Elasticsearch API documentation} */ -export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise> -export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_template + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -66,9 +93,15 @@ export default async function SearchTemplateApi (this: That body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index b99a0c05e..0e0c2fc7f 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,99 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchableSnapshots { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'searchable_snapshots.cache_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'searchable_snapshots.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'allow_no_indices', + 'ignore_unavailable' + ] + }, + 'searchable_snapshots.mount': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'index', + 'renamed_index', + 'index_settings', + 'ignore_index_settings' + ], + query: [ + 'master_timeout', + 'wait_for_completion', + 'storage' + ] + }, + 'searchable_snapshots.stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'level' + ] + } + } } /** * Get cache statistics. Get statistics about the shared cache for partially mounted indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/searchable-snapshots-api-cache-stats.html | Elasticsearch API documentation} */ - async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise - async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest | TB.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.cache_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -88,19 +141,32 @@ export default class SearchableSnapshots { * Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/searchable-snapshots-api-clear-cache.html | Elasticsearch API documentation} */ - async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise - async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest | TB.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.clear_cache'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -128,20 +194,27 @@ export default class SearchableSnapshots { * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/searchable-snapshots-api-mount-snapshot.html | Elasticsearch API documentation} */ - async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> - async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise - async mount (this: That, params: T.SearchableSnapshotsMountRequest | TB.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['searchable_snapshots.mount'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -151,9 +224,15 @@ export default class SearchableSnapshots { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -173,19 +252,32 @@ export default class SearchableSnapshots { * Get searchable snapshot statistics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/searchable-snapshots-api-stats.html | Elasticsearch API documentation} */ - async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.SearchableSnapshotsStatsRequest | TB.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const querystring: Record = {} - const body = undefined + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 68064e4ad..368c7a1e5 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,33 +21,673 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Security { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'security.activate_user_profile': { + path: [], + body: [ + 'access_token', + 'grant_type', + 'password', + 'username' + ], + query: [] + }, + 'security.authenticate': { + path: [], + body: [], + query: [] + }, + 'security.bulk_delete_role': { + path: [], + body: [ + 'names' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_put_role': { + path: [], + body: [ + 'roles' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_update_api_keys': { + path: [], + body: [ + 'expiration', + 'ids', + 'metadata', + 'role_descriptors' + ], + query: [] + }, + 'security.change_password': { + path: [ + 'username' + ], + body: [ + 'password', + 'password_hash' + ], + query: [ + 'refresh' + ] + }, + 'security.clear_api_key_cache': { + path: [ + 'ids' + ], + body: [], + query: [] + }, + 'security.clear_cached_privileges': { + path: [ + 'application' + ], + body: [], + query: [] + }, + 'security.clear_cached_realms': { + path: [ + 'realms' + ], + body: [], + query: [ + 'usernames' + ] + }, + 'security.clear_cached_roles': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.clear_cached_service_tokens': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [] + }, + 'security.create_api_key': { + path: [], + body: [ + 'expiration', + 'name', + 'role_descriptors', + 'metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.create_cross_cluster_api_key': { + path: [], + body: [ + 'access', + 'expiration', + 'metadata', + 'name' + ], + query: [] + }, + 'security.create_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delegate_pki': { + path: [], + body: [ + 'x509_certificate_chain' + ], + query: [] + }, + 'security.delete_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enroll_kibana': { + path: [], + body: [], + query: [] + }, + 'security.enroll_node': { + path: [], + body: [], + query: [] + }, + 'security.get_api_key': { + path: [], + body: [], + query: [ + 'id', + 'name', + 'owner', + 'realm_name', + 'username', + 'with_limited_by', + 'active_only', + 'with_profile_uid' + ] + }, + 'security.get_builtin_privileges': { + path: [], + body: [], + query: [] + }, + 'security.get_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [] + }, + 'security.get_role': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_service_accounts': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_service_credentials': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'security.get_token': { + path: [], + body: [ + 'grant_type', + 'scope', + 'password', + 'kerberos_ticket', + 'refresh_token', + 'username' + ], + query: [] + }, + 'security.get_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'with_profile_uid' + ] + }, + 'security.get_user_privileges': { + path: [], + body: [], + query: [] + }, + 'security.get_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'data' + ] + }, + 'security.grant_api_key': { + path: [], + body: [ + 'api_key', + 'grant_type', + 'access_token', + 'username', + 'password', + 'run_as' + ], + query: [ + 'refresh' + ] + }, + 'security.has_privileges': { + path: [ + 'user' + ], + body: [ + 'application', + 'cluster', + 'index' + ], + query: [] + }, + 'security.has_privileges_user_profile': { + path: [], + body: [ + 'uids', + 'privileges' + ], + query: [] + }, + 'security.invalidate_api_key': { + path: [], + body: [ + 'id', + 'ids', + 'name', + 'owner', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.invalidate_token': { + path: [], + body: [ + 'token', + 'refresh_token', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.oidc_authenticate': { + path: [], + body: [ + 'nonce', + 'realm', + 'redirect_uri', + 'state' + ], + query: [] + }, + 'security.oidc_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.oidc_prepare_authentication': { + path: [], + body: [ + 'iss', + 'login_hint', + 'nonce', + 'realm', + 'state' + ], + query: [] + }, + 'security.put_privileges': { + path: [], + body: [ + 'privileges' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role': { + path: [ + 'name' + ], + body: [ + 'applications', + 'cluster', + 'global', + 'indices', + 'remote_indices', + 'remote_cluster', + 'metadata', + 'run_as', + 'description', + 'transient_metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role_mapping': { + path: [ + 'name' + ], + body: [ + 'enabled', + 'metadata', + 'roles', + 'role_templates', + 'rules', + 'run_as' + ], + query: [ + 'refresh' + ] + }, + 'security.put_user': { + path: [], + body: [ + 'username', + 'email', + 'full_name', + 'metadata', + 'password', + 'password_hash', + 'roles', + 'enabled' + ], + query: [ + 'refresh' + ] + }, + 'security.query_api_keys': { + path: [], + body: [ + 'aggregations', + 'aggs', + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_limited_by', + 'with_profile_uid', + 'typed_keys' + ] + }, + 'security.query_role': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [] + }, + 'security.query_user': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_profile_uid' + ] + }, + 'security.saml_authenticate': { + path: [], + body: [ + 'content', + 'ids', + 'realm' + ], + query: [] + }, + 'security.saml_complete_logout': { + path: [], + body: [ + 'realm', + 'ids', + 'query_string', + 'content' + ], + query: [] + }, + 'security.saml_invalidate': { + path: [], + body: [ + 'acs', + 'query_string', + 'realm' + ], + query: [] + }, + 'security.saml_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.saml_prepare_authentication': { + path: [], + body: [ + 'acs', + 'realm', + 'relay_state' + ], + query: [] + }, + 'security.saml_service_provider_metadata': { + path: [ + 'realm_name' + ], + body: [], + query: [] + }, + 'security.suggest_user_profiles': { + path: [], + body: [ + 'name', + 'size', + 'data', + 'hint' + ], + query: [ + 'data' + ] + }, + 'security.update_api_key': { + path: [ + 'id' + ], + body: [ + 'role_descriptors', + 'metadata', + 'expiration' + ], + query: [] + }, + 'security.update_cross_cluster_api_key': { + path: [ + 'id' + ], + body: [ + 'access', + 'expiration', + 'metadata' + ], + query: [] + }, + 'security.update_settings': { + path: [], + body: [ + 'security', + 'security-profile', + 'security-tokens' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'security.update_user_profile_data': { + path: [ + 'uid' + ], + body: [ + 'labels', + 'data' + ], + query: [ + 'if_seq_no', + 'if_primary_term', + 'refresh' + ] + } + } } /** * Activate a user profile. Create or update a user profile on behalf of another user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice. This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-activate-user-profile.html | Elasticsearch API documentation} */ - async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise - async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest | TB.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access_token', 'grant_type', 'password', 'username'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.activate_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -71,9 +697,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -89,19 +721,32 @@ export default class Security { * Authenticate a user. Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-authenticate.html | Elasticsearch API documentation} */ - async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise - async authenticate (this: That, params?: T.SecurityAuthenticateRequest | TB.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.authenticate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -119,20 +764,27 @@ export default class Security { * Bulk delete roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-bulk-delete-role.html | Elasticsearch API documentation} */ - async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise - async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest | TB.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['names'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_delete_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -142,9 +794,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -160,20 +818,27 @@ export default class Security { * Bulk create or update roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-bulk-put-role.html | Elasticsearch API documentation} */ - async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise - async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest | TB.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['roles'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_put_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -183,9 +848,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -201,20 +872,27 @@ export default class Security { * Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. It is not possible to update expired or invalidated API keys. This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-bulk-update-api-keys.html | Elasticsearch API documentation} */ - async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest | TB.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest | TB.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> - async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest | TB.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise - async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest | TB.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['expiration', 'ids', 'metadata', 'role_descriptors'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_update_api_keys'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -224,9 +902,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -242,20 +926,27 @@ export default class Security { * Change passwords. Change the passwords of users in the native realm and built-in users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-change-password.html | Elasticsearch API documentation} */ - async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> - async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise - async changePassword (this: That, params?: T.SecurityChangePasswordRequest | TB.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] - const acceptedBody: string[] = ['password', 'password_hash'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.change_password'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -266,9 +957,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -294,18 +991,31 @@ export default class Security { * Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-clear-api-key-cache.html | Elasticsearch API documentation} */ - async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise - async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest | TB.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ids'] - const querystring: Record = {} - const body = undefined + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.clear_api_key_cache'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -326,18 +1036,31 @@ export default class Security { * Clear the privileges cache. Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-clear-privilege-cache.html | Elasticsearch API documentation} */ - async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise - async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest | TB.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application'] - const querystring: Record = {} - const body = undefined + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -358,18 +1081,31 @@ export default class Security { * Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-clear-cache.html | Elasticsearch API documentation} */ - async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise - async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest | TB.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['realms'] - const querystring: Record = {} - const body = undefined + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_realms'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -390,18 +1126,31 @@ export default class Security { * Clear the roles cache. Evict roles from the native role cache. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-clear-role-cache.html | Elasticsearch API documentation} */ - async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise - async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest | TB.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_roles'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -422,18 +1171,31 @@ export default class Security { * Clear service account token caches. Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. This API clears matching entries from both caches. The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-clear-service-token-caches.html | Elasticsearch API documentation} */ - async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise - async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest | TB.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] - const querystring: Record = {} - const body = undefined + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_service_tokens'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -456,20 +1218,27 @@ export default class Security { * Create an API key. Create an API key for access without requiring basic authentication. IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-create-api-key.html | Elasticsearch API documentation} */ - async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise - async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest | TB.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.create_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -480,9 +1249,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -498,20 +1273,27 @@ export default class Security { * Create a cross-cluster API key. Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-create-cross-cluster-api-key.html | Elasticsearch API documentation} */ - async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise - async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest | TB.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access', 'expiration', 'metadata', 'name'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.create_cross_cluster_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -521,9 +1303,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -539,18 +1327,31 @@ export default class Security { * Create a service account token. Create a service accounts token for access without requiring basic authentication. NOTE: Service account tokens never expire. You must actively delete them if they are no longer needed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-create-service-token.html | Elasticsearch API documentation} */ - async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise - async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest | TB.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] - const querystring: Record = {} - const body = undefined + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.create_service_token'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -580,20 +1381,27 @@ export default class Security { * Delegate PKI authentication. This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-as if the user connected directly to Elasticsearch. IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-delegate-pki-authentication.html | Elasticsearch API documentation} */ - async delegatePki (this: That, params: T.SecurityDelegatePkiRequest | TB.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delegatePki (this: That, params: T.SecurityDelegatePkiRequest | TB.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delegatePki (this: That, params: T.SecurityDelegatePkiRequest | TB.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise - async delegatePki (this: That, params: T.SecurityDelegatePkiRequest | TB.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['x509_certificate_chain'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.delegate_pki'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -603,9 +1411,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -621,18 +1435,31 @@ export default class Security { * Delete application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-delete-privilege.html | Elasticsearch API documentation} */ - async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise - async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest | TB.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application', 'name'] - const querystring: Record = {} - const body = undefined + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.delete_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -654,18 +1481,31 @@ export default class Security { * Delete roles. Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-delete-role.html | Elasticsearch API documentation} */ - async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise - async deleteRole (this: That, params: T.SecurityDeleteRoleRequest | TB.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.delete_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -686,18 +1526,31 @@ export default class Security { * Delete role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-delete-role-mapping.html | Elasticsearch API documentation} */ - async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise - async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest | TB.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.delete_role_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -718,18 +1571,31 @@ export default class Security { * Delete service account tokens. Delete service account tokens for a service in a specified namespace. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-delete-service-token.html | Elasticsearch API documentation} */ - async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise - async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest | TB.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] - const querystring: Record = {} - const body = undefined + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.delete_service_token'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -752,18 +1618,31 @@ export default class Security { * Delete users. Delete users from the native realm. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-delete-user.html | Elasticsearch API documentation} */ - async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise - async deleteUser (this: That, params: T.SecurityDeleteUserRequest | TB.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] - const querystring: Record = {} - const body = undefined + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.delete_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -784,18 +1663,31 @@ export default class Security { * Disable users. Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-disable-user.html | Elasticsearch API documentation} */ - async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise - async disableUser (this: That, params: T.SecurityDisableUserRequest | TB.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] - const querystring: Record = {} - const body = undefined + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.disable_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -816,18 +1708,31 @@ export default class Security { * Disable a user profile. Disable user profiles so that they are not visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-disable-user-profile.html | Elasticsearch API documentation} */ - async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise - async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest | TB.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] - const querystring: Record = {} - const body = undefined + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.disable_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -848,18 +1753,31 @@ export default class Security { * Enable users. Enable users in the native realm. By default, when you create users, they are enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-enable-user.html | Elasticsearch API documentation} */ - async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise - async enableUser (this: That, params: T.SecurityEnableUserRequest | TB.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] - const querystring: Record = {} - const body = undefined + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.enable_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -880,18 +1798,31 @@ export default class Security { * Enable a user profile. Enable user profiles to make them visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, it's automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-enable-user-profile.html | Elasticsearch API documentation} */ - async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise - async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest | TB.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] - const querystring: Record = {} - const body = undefined + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.enable_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -912,19 +1843,32 @@ export default class Security { * Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. NOTE: This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-kibana-enrollment.html | Elasticsearch API documentation} */ - async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> - async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise - async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest | TB.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.enroll_kibana'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -942,19 +1886,32 @@ export default class Security { * Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-node-enrollment.html | Elasticsearch API documentation} */ - async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise - async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest | TB.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.enroll_node'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -972,19 +1929,32 @@ export default class Security { * Get API key information. Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-api-key.html | Elasticsearch API documentation} */ - async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise - async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest | TB.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1002,19 +1972,32 @@ export default class Security { * Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-builtin-privileges.html | Elasticsearch API documentation} */ - async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise - async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest | TB.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_builtin_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1032,19 +2015,32 @@ export default class Security { * Get application privileges. To use this API, you must have one of the following privileges: * The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-privileges.html | Elasticsearch API documentation} */ - async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise - async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest | TB.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application', 'name'] - const querystring: Record = {} - const body = undefined + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1076,19 +2072,32 @@ export default class Security { * Get roles. Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-role.html | Elasticsearch API documentation} */ - async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise - async getRole (this: That, params?: T.SecurityGetRoleRequest | TB.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1116,19 +2125,32 @@ export default class Security { * Get role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-role-mapping.html | Elasticsearch API documentation} */ - async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise - async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest | TB.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_role_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1156,19 +2178,32 @@ export default class Security { * Get service accounts. Get a list of service accounts that match the provided path parameters. NOTE: Currently, only the `elastic/fleet-server` service account is available. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-service-accounts.html | Elasticsearch API documentation} */ - async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise - async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest | TB.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service'] - const querystring: Record = {} - const body = undefined + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_service_accounts'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1200,18 +2235,31 @@ export default class Security { * Get service account credentials. To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-service-credentials.html | Elasticsearch API documentation} */ - async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise - async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest | TB.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service'] - const querystring: Record = {} - const body = undefined + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_service_credentials'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1233,19 +2281,32 @@ export default class Security { * Get security index settings. Get the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` * `index.number_of_replicas` * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-settings.html | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.SecurityGetSettingsRequest | TB.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.SecurityGetSettingsRequest | TB.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.SecurityGetSettingsRequest | TB.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.SecurityGetSettingsRequest | TB.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1263,20 +2324,27 @@ export default class Security { * Get a token. Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-token.html | Elasticsearch API documentation} */ - async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise - async getToken (this: That, params?: T.SecurityGetTokenRequest | TB.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.get_token'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1287,9 +2355,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1305,19 +2379,32 @@ export default class Security { * Get users. Get information about users in the native realm and built-in users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-user.html | Elasticsearch API documentation} */ - async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise - async getUser (this: That, params?: T.SecurityGetUserRequest | TB.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] - const querystring: Record = {} - const body = undefined + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1345,19 +2432,32 @@ export default class Security { * Get user privileges. Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-user-privileges.html | Elasticsearch API documentation} */ - async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise - async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest | TB.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_user_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1375,18 +2475,31 @@ export default class Security { * Get a user profile. Get a user's profile using the unique profile ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-get-user-profile.html | Elasticsearch API documentation} */ - async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise - async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest | TB.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] - const querystring: Record = {} - const body = undefined + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.get_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -1407,20 +2520,27 @@ export default class Security { * Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user's credentials. The supported user authentication credential types are: * username and password * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-grant-api-key.html | Elasticsearch API documentation} */ - async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise - async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest | TB.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password', 'run_as'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.grant_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1430,9 +2550,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1448,20 +2574,27 @@ export default class Security { * Check user privileges. Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-has-privileges.html | Elasticsearch API documentation} */ - async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise - async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest | TB.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['user'] - const acceptedBody: string[] = ['application', 'cluster', 'index'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.has_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1472,9 +2605,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1500,20 +2639,27 @@ export default class Security { * Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-has-privileges-user-profile.html | Elasticsearch API documentation} */ - async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> - async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise - async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest | TB.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['uids', 'privileges'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.has_privileges_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1523,9 +2669,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1541,20 +2693,27 @@ export default class Security { * Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user's identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-invalidate-api-key.html | Elasticsearch API documentation} */ - async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise - async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest | TB.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.invalidate_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1565,9 +2724,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1583,20 +2748,27 @@ export default class Security { * Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-invalidate-token.html | Elasticsearch API documentation} */ - async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> - async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise - async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest | TB.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.invalidate_token'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1607,9 +2779,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1625,20 +2803,27 @@ export default class Security { * Authenticate OpenID Connect. Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-oidc-authenticate.html | Elasticsearch API documentation} */ - async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest | TB.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest | TB.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest | TB.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise - async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest | TB.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['nonce', 'realm', 'redirect_uri', 'state'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_authenticate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1648,9 +2833,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1666,20 +2857,27 @@ export default class Security { * Logout of OpenID Connect. Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-oidc-logout.html | Elasticsearch API documentation} */ - async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise - async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest | TB.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_logout'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1689,9 +2887,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1707,20 +2911,27 @@ export default class Security { * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-oidc-prepare-authentication.html | Elasticsearch API documentation} */ - async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest | TB.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest | TB.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest | TB.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise - async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest | TB.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['iss', 'login_hint', 'nonce', 'realm', 'state'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_prepare_authentication'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1731,9 +2942,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1749,25 +2966,35 @@ export default class Security { * Create or update application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. Application names are formed from a prefix, with an optional suffix that conform to the following rules: * The prefix must begin with a lowercase ASCII letter. * The prefix must contain only ASCII letters or digits. * The prefix must be at least 3 characters long. * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-put-privileges.html | Elasticsearch API documentation} */ - async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise - async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest | TB.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['privileges'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_privileges'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1783,20 +3010,27 @@ export default class Security { * Create or update roles. The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-put-role.html | Elasticsearch API documentation} */ - async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise - async putRole (this: That, params: T.SecurityPutRoleRequest | TB.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'remote_cluster', 'metadata', 'run_as', 'description', 'transient_metadata'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1806,9 +3040,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1827,20 +3067,27 @@ export default class Security { * Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. **Role templates** The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. The `roles` field is used for this purpose. For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The `role_templates` field is used for this purpose. NOTE: To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail. All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-put-role-mapping.html | Elasticsearch API documentation} */ - async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise - async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest | TB.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'role_templates', 'rules', 'run_as'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_role_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1850,9 +3097,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1871,20 +3124,27 @@ export default class Security { * Create or update users. Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user's password without updating any other fields, use the change password API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-put-user.html | Elasticsearch API documentation} */ - async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise - async putUser (this: That, params: T.SecurityPutUserRequest | TB.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -1894,9 +3154,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1915,20 +3181,27 @@ export default class Security { * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-query-api-key.html | Elasticsearch API documentation} */ - async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> - async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise - async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest | TB.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'from', 'sort', 'size', 'search_after'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_api_keys'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1939,9 +3212,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1957,20 +3236,27 @@ export default class Security { * Find roles with a query. Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-query-role.html | Elasticsearch API documentation} */ - async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise - async queryRole (this: That, params?: T.SecurityQueryRoleRequest | TB.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -1981,9 +3267,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1999,20 +3291,27 @@ export default class Security { * Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. NOTE: As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-query-user.html | Elasticsearch API documentation} */ - async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> - async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise - async queryUser (this: That, params?: T.SecurityQueryUserRequest | TB.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -2023,9 +3322,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2041,20 +3346,27 @@ export default class Security { * Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML message that is submitted can be: * A response to a SAML authentication request that was previously created using the SAML prepare authentication API. * An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-saml-authenticate.html | Elasticsearch API documentation} */ - async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise - async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest | TB.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['content', 'ids', 'realm'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_authenticate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2064,9 +3376,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2082,20 +3400,27 @@ export default class Security { * Logout of SAML completely. Verifies the logout response sent from the SAML IdP. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-saml-complete-logout.html | Elasticsearch API documentation} */ - async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise - async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest | TB.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['realm', 'ids', 'query_string', 'content'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_complete_logout'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2105,9 +3430,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2123,20 +3454,27 @@ export default class Security { * Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-saml-invalidate.html | Elasticsearch API documentation} */ - async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise - async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest | TB.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['acs', 'query_string', 'realm'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_invalidate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2146,9 +3484,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2164,20 +3508,27 @@ export default class Security { * Logout of SAML. Submits a request to invalidate an access token and refresh token. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-saml-logout.html | Elasticsearch API documentation} */ - async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise - async samlLogout (this: That, params: T.SecuritySamlLogoutRequest | TB.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_logout'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2187,9 +3538,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2205,20 +3562,27 @@ export default class Security { * Prepare SAML authentication. Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-saml-prepare-authentication.html | Elasticsearch API documentation} */ - async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise - async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest | TB.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['acs', 'realm', 'relay_state'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_prepare_authentication'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -2229,9 +3593,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2247,18 +3617,31 @@ export default class Security { * Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-saml-sp-metadata.html | Elasticsearch API documentation} */ - async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise - async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest | TB.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['realm_name'] - const querystring: Record = {} - const body = undefined + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['security.saml_service_provider_metadata'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -2279,20 +3662,27 @@ export default class Security { * Suggest a user profile. Get suggestions for user profiles that match specified search criteria. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-suggest-user-profile.html | Elasticsearch API documentation} */ - async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise - async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest | TB.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['name', 'size', 'data', 'hint'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.suggest_user_profiles'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -2303,9 +3693,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2321,20 +3717,27 @@ export default class Security { * Update an API key. Update attributes of an existing API key. This API supports updates to an API key's access scope, expiration, and metadata. To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-update-api-key.html | Elasticsearch API documentation} */ - async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise - async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest | TB.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['role_descriptors', 'metadata', 'expiration'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2344,9 +3747,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2365,20 +3774,27 @@ export default class Security { * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-update-cross-cluster-api-key.html | Elasticsearch API documentation} */ - async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise - async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest | TB.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['access', 'expiration', 'metadata'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_cross_cluster_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2388,9 +3804,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2409,20 +3831,27 @@ export default class Security { * Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-update-settings.html | Elasticsearch API documentation} */ - async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest | TB.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest | TB.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest | TB.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise - async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest | TB.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['security', 'security-profile', 'security-tokens'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -2433,9 +3862,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2451,20 +3886,27 @@ export default class Security { * Update user profile data. Update specific data for the user profile that is associated with a unique ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. To use this API, you must have one of the following privileges: * The `manage_user_profile` cluster privilege. * The `update_profile_data` global privilege for the namespaces that are referenced in the request. This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-update-user-profile-data.html | Elasticsearch API documentation} */ - async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise - async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest | TB.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] - const acceptedBody: string[] = ['labels', 'data'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_user_profile_data'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -2474,9 +3916,15 @@ export default class Security { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index 543c90992..4b1bc2f2d 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,86 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Shutdown { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'shutdown.delete_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'shutdown.get_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'shutdown.put_node': { + path: [ + 'node_id' + ], + body: [ + 'type', + 'reason', + 'allocation_delay', + 'target_node_name' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-shutdown.html | Elasticsearch API documentation} */ - async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise - async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest | TB.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['shutdown.delete_node'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,19 +121,32 @@ export default class Shutdown { * Get the shutdown status. Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-shutdown.html | Elasticsearch API documentation} */ - async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise - async getNode (this: That, params?: T.ShutdownGetNodeRequest | TB.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const querystring: Record = {} - const body = undefined + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['shutdown.get_node'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -120,20 +174,27 @@ export default class Shutdown { * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-shutdown.html | Elasticsearch API documentation} */ - async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise - async putNode (this: That, params: T.ShutdownPutNodeRequest | TB.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const acceptedBody: string[] = ['type', 'reason', 'allocation_delay', 'target_node_name'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['shutdown.put_node'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -143,9 +204,15 @@ export default class Shutdown { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index fbda86764..d2a15b84f 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,33 +21,63 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Simulate { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'simulate.ingest': { + path: [ + 'index' + ], + body: [ + 'docs', + 'component_template_substitutions', + 'index_template_substitutions', + 'mapping_addition', + 'pipeline_substitutions' + ], + query: [ + 'pipeline' + ] + } + } } /** * Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/simulate-ingest-api.html | Elasticsearch API documentation} */ - async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> - async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptions): Promise - async ingest (this: That, params: T.SimulateIngestRequest | TB.SimulateIngestRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_substitutions', 'mapping_addition', 'pipeline_substitutions'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['simulate.ingest'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -71,9 +87,15 @@ export default class Simulate { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 492bd1861..27b59d5e6 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,138 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Slm { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'slm.delete_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_retention': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_status': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.put_lifecycle': { + path: [ + 'policy_id' + ], + body: [ + 'config', + 'name', + 'repository', + 'retention', + 'schedule' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-delete-policy.html | Elasticsearch API documentation} */ - async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise - async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest | TB.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] - const querystring: Record = {} - const body = undefined + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['slm.delete_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +173,31 @@ export default class Slm { * Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-execute-lifecycle.html | Elasticsearch API documentation} */ - async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise - async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest | TB.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] - const querystring: Record = {} - const body = undefined + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['slm.execute_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -112,19 +218,32 @@ export default class Slm { * Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-execute-retention.html | Elasticsearch API documentation} */ - async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> - async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise - async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest | TB.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['slm.execute_retention'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -142,19 +261,32 @@ export default class Slm { * Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-get-policy.html | Elasticsearch API documentation} */ - async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise - async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest | TB.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] - const querystring: Record = {} - const body = undefined + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['slm.get_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -182,19 +314,32 @@ export default class Slm { * Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-get-stats.html | Elasticsearch API documentation} */ - async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise - async getStats (this: That, params?: T.SlmGetStatsRequest | TB.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['slm.get_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -212,19 +357,32 @@ export default class Slm { * Get the snapshot lifecycle management status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-get-status.html | Elasticsearch API documentation} */ - async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise - async getStatus (this: That, params?: T.SlmGetStatusRequest | TB.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['slm.get_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -242,20 +400,27 @@ export default class Slm { * Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-put-policy.html | Elasticsearch API documentation} */ - async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise - async putLifecycle (this: That, params: T.SlmPutLifecycleRequest | TB.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] - const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['slm.put_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -265,9 +430,15 @@ export default class Slm { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -286,19 +457,32 @@ export default class Slm { * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-start.html | Elasticsearch API documentation} */ - async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> - async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise - async start (this: That, params?: T.SlmStartRequest | TB.SlmStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['slm.start'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -316,19 +500,32 @@ export default class Slm { * Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/slm-api-stop.html | Elasticsearch API documentation} */ - async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise - async stop (this: That, params?: T.SlmStopRequest | TB.SlmStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['slm.stop'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 39d060ae1..8d6c4efec 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,238 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Snapshot { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'snapshot.cleanup_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.clone': { + path: [ + 'repository', + 'snapshot', + 'target_snapshot' + ], + body: [ + 'indices' + ], + query: [ + 'master_timeout' + ] + }, + 'snapshot.create': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'ignore_unavailable', + 'include_global_state', + 'indices', + 'feature_states', + 'metadata', + 'partial' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.create_repository': { + path: [ + 'name' + ], + body: [ + 'repository' + ], + query: [ + 'master_timeout', + 'timeout', + 'verify' + ] + }, + 'snapshot.delete': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.delete_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.get': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'ignore_unavailable', + 'master_timeout', + 'verbose', + 'index_details', + 'index_names', + 'include_repository', + 'sort', + 'size', + 'order', + 'after', + 'offset', + 'from_sort_value', + 'slm_policy_filter' + ] + }, + 'snapshot.get_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'snapshot.repository_analyze': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_count', + 'concurrency', + 'detailed', + 'early_read_node_count', + 'max_blob_size', + 'max_total_data_size', + 'rare_action_probability', + 'rarely_abort_writes', + 'read_node_count', + 'register_operation_count', + 'seed', + 'timeout' + ] + }, + 'snapshot.repository_verify_integrity': { + path: [ + 'name' + ], + body: [], + query: [ + 'meta_thread_pool_concurrency', + 'blob_thread_pool_concurrency', + 'snapshot_verification_concurrency', + 'index_verification_concurrency', + 'index_snapshot_verification_concurrency', + 'max_failed_shard_snapshots', + 'verify_blob_contents', + 'max_bytes_per_sec' + ] + }, + 'snapshot.restore': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'feature_states', + 'ignore_index_settings', + 'ignore_unavailable', + 'include_aliases', + 'include_global_state', + 'index_settings', + 'indices', + 'partial', + 'rename_pattern', + 'rename_replacement' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.status': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'snapshot.verify_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/clean-up-snapshot-repo-api.html | Elasticsearch API documentation} */ - async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise - async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest | TB.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.cleanup_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,20 +273,27 @@ export default class Snapshot { * Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/clone-snapshot-api.html | Elasticsearch API documentation} */ - async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise - async clone (this: That, params: T.SnapshotCloneRequest | TB.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] - const acceptedBody: string[] = ['indices'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.clone'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -103,9 +303,15 @@ export default class Snapshot { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -126,20 +332,27 @@ export default class Snapshot { * Create a snapshot. Take a snapshot of a cluster or of data streams and indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/create-snapshot-api.html | Elasticsearch API documentation} */ - async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise - async create (this: That, params: T.SnapshotCreateRequest | TB.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['ignore_unavailable', 'include_global_state', 'indices', 'feature_states', 'metadata', 'partial'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.create'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -149,9 +362,15 @@ export default class Snapshot { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -171,25 +390,35 @@ export default class Snapshot { * Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/modules-snapshots.html | Elasticsearch API documentation} */ - async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise - async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest | TB.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['repository'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.create_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -208,18 +437,31 @@ export default class Snapshot { * Delete snapshots. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-snapshot-api.html | Elasticsearch API documentation} */ - async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> - async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise - async delete (this: That, params: T.SnapshotDeleteRequest | TB.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const querystring: Record = {} - const body = undefined + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -241,18 +483,31 @@ export default class Snapshot { * Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-snapshot-repo-api.html | Elasticsearch API documentation} */ - async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise - async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest | TB.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.delete_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -273,18 +528,31 @@ export default class Snapshot { * Get snapshot information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-snapshot-api.html | Elasticsearch API documentation} */ - async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.SnapshotGetRequest | TB.SnapshotGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const querystring: Record = {} - const body = undefined + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -306,19 +574,32 @@ export default class Snapshot { * Get snapshot repository information. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-snapshot-repo-api.html | Elasticsearch API documentation} */ - async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise - async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest | TB.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.get_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -346,18 +627,31 @@ export default class Snapshot { * Analyze a snapshot repository. Analyze the performance characteristics and any incorrect behaviour found in a repository. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. NOTE: This API may not work correctly in a mixed-version cluster. *Implementation details* NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/repo-analysis-api.html | Elasticsearch API documentation} */ - async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest | TB.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest | TB.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> - async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest | TB.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise - async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest | TB.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.repository_analyze'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -378,18 +672,31 @@ export default class Snapshot { * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/verify-repo-integrity-api.html | Elasticsearch API documentation} */ - async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> - async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise - async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest | TB.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.repository_verify_integrity'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -410,20 +717,27 @@ export default class Snapshot { * Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/restore-snapshot-api.html | Elasticsearch API documentation} */ - async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> - async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise - async restore (this: That, params: T.SnapshotRestoreRequest | TB.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['feature_states', 'ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.restore'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -433,9 +747,15 @@ export default class Snapshot { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -455,19 +775,32 @@ export default class Snapshot { * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-snapshot-status-api.html | Elasticsearch API documentation} */ - async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise - async status (this: That, params?: T.SnapshotStatusRequest | TB.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const querystring: Record = {} - const body = undefined + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -499,18 +832,31 @@ export default class Snapshot { * Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/verify-snapshot-repo-api.html | Elasticsearch API documentation} */ - async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise - async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest | TB.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const querystring: Record = {} - const body = undefined + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['snapshot.verify_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 254cf5266..05fac7a23 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,33 +21,116 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Sql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'sql.clear_cursor': { + path: [], + body: [ + 'cursor' + ], + query: [] + }, + 'sql.delete_async': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.get_async': { + path: [ + 'id' + ], + body: [], + query: [ + 'delimiter', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'sql.get_async_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.query': { + path: [], + body: [ + 'allow_partial_search_results', + 'catalog', + 'columnar', + 'cursor', + 'fetch_size', + 'field_multi_value_leniency', + 'filter', + 'index_using_frozen', + 'keep_alive', + 'keep_on_completion', + 'page_timeout', + 'params', + 'query', + 'request_timeout', + 'runtime_mappings', + 'time_zone', + 'wait_for_completion_timeout' + ], + query: [ + 'format' + ] + }, + 'sql.translate': { + path: [], + body: [ + 'fetch_size', + 'filter', + 'query', + 'time_zone' + ], + query: [] + } + } } /** * Clear an SQL search cursor. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/clear-sql-cursor-api.html | Elasticsearch API documentation} */ - async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> - async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise - async clearCursor (this: That, params: T.SqlClearCursorRequest | TB.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['cursor'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.clear_cursor'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -71,9 +140,15 @@ export default class Sql { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -89,18 +164,31 @@ export default class Sql { * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-async-sql-search-api.html | Elasticsearch API documentation} */ - async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise - async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest | TB.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['sql.delete_async'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -121,18 +209,31 @@ export default class Sql { * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-async-sql-search-api.html | Elasticsearch API documentation} */ - async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise - async getAsync (this: That, params: T.SqlGetAsyncRequest | TB.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['sql.get_async'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -153,18 +254,31 @@ export default class Sql { * Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-async-sql-search-status-api.html | Elasticsearch API documentation} */ - async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise - async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest | TB.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['sql.get_async_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -185,20 +299,27 @@ export default class Sql { * Get SQL search results. Run an SQL request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/sql-search-api.html | Elasticsearch API documentation} */ - async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> - async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise - async query (this: That, params?: T.SqlQueryRequest | TB.SqlQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['allow_partial_search_results', 'catalog', 'columnar', 'cursor', 'fetch_size', 'field_multi_value_leniency', 'filter', 'index_using_frozen', 'keep_alive', 'keep_on_completion', 'page_timeout', 'params', 'query', 'request_timeout', 'runtime_mappings', 'time_zone', 'wait_for_completion_timeout'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -209,9 +330,15 @@ export default class Sql { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -227,20 +354,27 @@ export default class Sql { * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/sql-translate-api.html | Elasticsearch API documentation} */ - async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise - async translate (this: That, params: T.SqlTranslateRequest | TB.SqlTranslateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.translate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -250,9 +384,15 @@ export default class Sql { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 2226d88e3..dbf757772 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,56 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Ssl { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ssl.certificates': { + path: [], + body: [], + query: [] + } + } } /** * Get SSL certificates. Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: - Settings for transport and HTTP interfaces - TLS settings that are used within authentication realms - TLS settings for remote monitoring exporters The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/security-api-ssl.html | Elasticsearch API documentation} */ - async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise - async certificates (this: That, params?: T.SslCertificatesRequest | TB.SslCertificatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['ssl.certificates'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/streams.ts b/src/api/api/streams.ts index 0762d9fa1..b823936d4 100644 --- a/src/api/api/streams.ts +++ b/src/api/api/streams.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,66 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Streams { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'streams.logs_disable': { + path: [], + body: [], + query: [] + }, + 'streams.logs_enable': { + path: [], + body: [], + query: [] + }, + 'streams.status': { + path: [], + body: [], + query: [] + } + } } /** * Disable the Logs Streams feature for this cluster * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/streams-logs-disable.html | Elasticsearch API documentation} */ - async logsDisable (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async logsDisable (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async logsDisable (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async logsDisable (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async logsDisable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['streams.logs_disable'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -77,19 +97,32 @@ export default class Streams { * Enable the Logs Streams feature for this cluster * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/streams-logs-enable.html | Elasticsearch API documentation} */ - async logsEnable (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async logsEnable (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async logsEnable (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async logsEnable (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async logsEnable (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['streams.logs_enable'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -106,19 +139,32 @@ export default class Streams { * Return the current status of the streams feature for each streams type * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/streams-status.html | Elasticsearch API documentation} */ - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async status (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['streams.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 193abd6e1..c196100f4 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,112 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Synonyms { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'synonyms.delete_synonym': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'synonyms.delete_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonym': { + path: [ + 'id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.get_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonyms_sets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.put_synonym': { + path: [ + 'id' + ], + body: [ + 'synonyms_set' + ], + query: [] + }, + 'synonyms.put_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [ + 'synonyms' + ], + query: [] + } + } } /** * Delete a synonym set. You can only delete a synonyms set that is not in use by any index analyzer. Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can't be deleted. A delete request in this case will return a 400 response code. To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-synonyms-set.html | Elasticsearch API documentation} */ - async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise - async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest | TB.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['synonyms.delete_synonym'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,18 +147,31 @@ export default class Synonyms { * Delete a synonym rule. Delete a synonym rule from a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-synonym-rule.html | Elasticsearch API documentation} */ - async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise - async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest | TB.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] - const querystring: Record = {} - const body = undefined + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['synonyms.delete_synonym_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -113,18 +193,31 @@ export default class Synonyms { * Get a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-synonyms-set.html | Elasticsearch API documentation} */ - async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise - async getSynonym (this: That, params: T.SynonymsGetSynonymRequest | TB.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonym'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -145,18 +238,31 @@ export default class Synonyms { * Get a synonym rule. Get a synonym rule from a synonym set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-synonym-rule.html | Elasticsearch API documentation} */ - async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise - async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest | TB.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] - const querystring: Record = {} - const body = undefined + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonym_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -178,19 +284,32 @@ export default class Synonyms { * Get all synonym sets. Get a summary of all defined synonym sets. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-synonyms-set.html | Elasticsearch API documentation} */ - async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise - async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest | TB.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonyms_sets'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -208,20 +327,27 @@ export default class Synonyms { * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-synonyms-set.html | Elasticsearch API documentation} */ - async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise - async putSynonym (this: That, params: T.SynonymsPutSynonymRequest | TB.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['synonyms_set'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['synonyms.put_synonym'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -231,9 +357,15 @@ export default class Synonyms { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -252,20 +384,27 @@ export default class Synonyms { * Create or update a synonym rule. Create or update a synonym rule in a synonym set. If any of the synonym rules included is invalid, the API returns an error. When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-synonym-rule.html | Elasticsearch API documentation} */ - async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise - async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest | TB.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] - const acceptedBody: string[] = ['synonyms'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['synonyms.put_synonym_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -275,9 +414,15 @@ export default class Synonyms { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index e05cd4b67..87ea2e13a 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,86 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Tasks { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'tasks.cancel': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'actions', + 'nodes', + 'parent_task_id', + 'wait_for_completion' + ] + }, + 'tasks.get': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + }, + 'tasks.list': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'group_by', + 'nodes', + 'parent_task_id', + 'timeout', + 'wait_for_completion' + ] + } + } } /** * Cancel a task. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/tasks.html | Elasticsearch API documentation} */ - async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> - async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise - async cancel (this: That, params?: T.TasksCancelRequest | TB.TasksCancelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['tasks.cancel'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -88,18 +128,31 @@ export default class Tasks { * Get task information. Get information about a task currently running in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/tasks.html | Elasticsearch API documentation} */ - async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> - async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise - async get (this: That, params: T.TasksGetRequest | TB.TasksGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['tasks.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -120,19 +173,32 @@ export default class Tasks { * Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/tasks.html | Elasticsearch API documentation} */ - async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> - async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise - async list (this: That, params?: T.TasksListRequest | TB.TasksListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['tasks.list'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index a42af2f65..94fa88afd 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,56 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + terms_enum: { + path: [ + 'index' + ], + body: [ + 'field', + 'size', + 'timeout', + 'case_insensitive', + 'index_filter', + 'string', + 'search_after' + ], + query: [] + } +} /** * Get terms in an index. Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios. > info > The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/search-terms-enum.html | Elasticsearch API documentation} */ -export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise -export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.terms_enum + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +80,15 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index bc9d95194..fc3b43cec 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,74 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + termvectors: { + path: [ + 'index', + 'id' + ], + body: [ + 'doc', + 'filter', + 'per_field_analyzer', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'term_statistics', + 'routing', + 'version', + 'version_type' + ], + query: [ + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} /** * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-termvectors.html | Elasticsearch API documentation} */ -export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise -export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'id'] - const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer', 'fields', 'field_statistics', 'offsets', 'payloads', 'positions', 'term_statistics', 'routing', 'version', 'version_type'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.termvectors + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +98,15 @@ export default async function TermvectorsApi (this: That, p body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index 1e965b92f..9155d0982 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,124 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class TextStructure { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'text_structure.find_field_structure': { + path: [], + body: [], + query: [ + 'column_names', + 'delimiter', + 'documents_to_sample', + 'ecs_compatibility', + 'explain', + 'field', + 'format', + 'grok_pattern', + 'index', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_message_structure': { + path: [], + body: [ + 'messages' + ], + query: [ + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_structure': { + path: [], + body: [ + 'text_files' + ], + query: [ + 'charset', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'has_header_row', + 'line_merge_size_limit', + 'lines_to_sample', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.test_grok_pattern': { + path: [], + body: [ + 'grok_pattern', + 'text' + ], + query: [ + 'ecs_compatibility' + ] + } + } } /** * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/find-field-structure.html | Elasticsearch API documentation} */ - async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> - async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise - async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest | TB.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['text_structure.find_field_structure'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -77,20 +156,27 @@ export default class TextStructure { * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/find-message-structure.html | Elasticsearch API documentation} */ - async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> - async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise - async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest | TB.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['messages'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.find_message_structure'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -100,9 +186,15 @@ export default class TextStructure { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -118,25 +210,35 @@ export default class TextStructure { * Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. The response from the API contains: * A couple of messages from the beginning of the text. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/find-structure.html | Elasticsearch API documentation} */ - async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> - async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise - async findStructure (this: That, params: T.TextStructureFindStructureRequest | TB.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['text_files'] - const querystring: Record = {} - // @ts-expect-error - let body: any = params.body ?? undefined + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.find_structure'] + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined for (const key in params) { if (acceptedBody.includes(key)) { // @ts-expect-error body = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -152,20 +254,27 @@ export default class TextStructure { * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/test-grok-pattern.html | Elasticsearch API documentation} */ - async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> - async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise - async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest | TB.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['grok_pattern', 'text'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.test_grok_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -175,9 +284,15 @@ export default class TextStructure { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index c0784dcdc..24b3dfe4f 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,209 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Transform { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'transform.delete_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'delete_dest_index', + 'timeout' + ] + }, + 'transform.get_node_stats': { + path: [], + body: [], + query: [] + }, + 'transform.get_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'transform.get_transform_stats': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'timeout' + ] + }, + 'transform.preview_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'pivot', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'latest' + ], + query: [ + 'timeout' + ] + }, + 'transform.put_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'latest', + '_meta', + 'pivot', + 'retention_policy', + 'settings', + 'source', + 'sync' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.reset_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'transform.schedule_now_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'transform.set_upgrade_mode': { + path: [], + body: [], + query: [ + 'enabled', + 'timeout' + ] + }, + 'transform.start_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout', + 'from' + ] + }, + 'transform.stop_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout', + 'wait_for_checkpoint', + 'wait_for_completion' + ] + }, + 'transform.update_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + '_meta', + 'source', + 'settings', + 'sync', + 'retention_policy' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.upgrade_transforms': { + path: [], + body: [], + query: [ + 'dry_run', + 'timeout' + ] + } + } } /** * Delete a transform. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/delete-transform.html | Elasticsearch API documentation} */ - async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise - async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.delete_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -80,19 +244,32 @@ export default class Transform { * Retrieves transform usage information for transform nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-transform-node-stats.html | Elasticsearch API documentation} */ - async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async getNodeStats (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.get_node_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { querystring[key] = params[key] } } @@ -109,19 +286,32 @@ export default class Transform { * Get transforms. Get configuration information for transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-transform.html | Elasticsearch API documentation} */ - async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise - async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.get_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -149,18 +339,31 @@ export default class Transform { * Get transform stats. Get usage information for transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/get-transform-stats.html | Elasticsearch API documentation} */ - async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise - async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.get_transform_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -181,20 +384,27 @@ export default class Transform { * Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/preview-transform.html | Elasticsearch API documentation} */ - async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> - async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> - async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> - async previewTransform (this: That, params?: T.TransformPreviewTransformRequest | TB.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.preview_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -205,9 +415,15 @@ export default class Transform { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -233,20 +449,27 @@ export default class Transform { * Create a transform. Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/put-transform.html | Elasticsearch API documentation} */ - async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise - async putTransform (this: That, params: T.TransformPutTransformRequest | TB.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'latest', '_meta', 'pivot', 'retention_policy', 'settings', 'source', 'sync'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.put_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -256,9 +479,15 @@ export default class Transform { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -277,18 +506,31 @@ export default class Transform { * Reset a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/reset-transform.html | Elasticsearch API documentation} */ - async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptions): Promise - async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.reset_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -309,18 +551,31 @@ export default class Transform { * Schedule a transform to start now. Instantly run a transform to process data. If you run this API, the transform will process the new data instantly, without waiting for the configured frequency interval. After the API is called, the transform will be processed again at `now + frequency` unless the API is called again in the meantime. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/schedule-now-transform.html | Elasticsearch API documentation} */ - async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise - async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.schedule_now_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -338,22 +593,36 @@ export default class Transform { } /** - * Sets a cluster wide upgrade_mode setting that prepares transform indices for an upgrade. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/transform-set-upgrade-mode.html | Elasticsearch API documentation} + * Set upgrade_mode for transform indices. Sets a cluster wide upgrade_mode setting that prepares transform indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your transform indices. In those circumstances, there must be no transforms running. You can close the transforms, do the upgrade, then open all the transforms again. Alternatively, you can use this API to temporarily halt tasks associated with the transforms and prevent new transforms from opening. You can also use this API during upgrades that do not require you to reindex your transform indices, though stopping transforms is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get transform info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-set-upgrade-mode | Elasticsearch API documentation} */ - async setUpgradeMode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async setUpgradeMode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async setUpgradeMode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise - async setUpgradeMode (this: That, params?: T.TODO | TB.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptions): Promise + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.set_upgrade_mode'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -370,18 +639,31 @@ export default class Transform { * Start a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/start-transform.html | Elasticsearch API documentation} */ - async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise - async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.start_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -402,18 +684,31 @@ export default class Transform { * Stop transforms. Stops one or more transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/stop-transform.html | Elasticsearch API documentation} */ - async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise - async stopTransform (this: That, params: T.TransformStopTransformRequest | TB.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const querystring: Record = {} - const body = undefined + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.stop_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -434,20 +729,27 @@ export default class Transform { * Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-transform-update-transform | Elasticsearch API documentation} */ - async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise - async updateTransform (this: That, params: T.TransformUpdateTransformRequest | TB.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', '_meta', 'source', 'settings', 'sync', 'retention_policy'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.update_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -457,9 +759,15 @@ export default class Transform { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -478,19 +786,32 @@ export default class Transform { * Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-transform-upgrade-transforms | Elasticsearch API documentation} */ - async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise - async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest | TB.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['transform.upgrade_transforms'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 4e5490313..e745b627a 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,27 +21,71 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + update: { + path: [ + 'id', + 'index' + ], + body: [ + 'detect_noop', + 'doc', + 'doc_as_upsert', + 'script', + 'scripted_upsert', + '_source', + 'upsert' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'lang', + 'refresh', + 'require_alias', + 'retry_on_conflict', + 'routing', + 'timeout', + 'wait_for_active_shards', + '_source', + '_source_excludes', + '_source_includes' + ] + } +} /** * Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. * Indexes the result. The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-update.html | Elasticsearch API documentation} */ -export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise> -export default async function UpdateApi (this: That, params: T.UpdateRequest | TB.UpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +95,15 @@ export default async function UpdateApi = { + update_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'script', + 'slice', + 'conflicts' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'pipeline', + 'preference', + 'q', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} /** * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. **Update the document source** Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-update-by-query.html | Elasticsearch API documentation} */ -export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise -export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update_by_query + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -65,9 +110,15 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index 4537e2ea8..10a22e5eb 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,25 +21,52 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + update_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/docs-update-by-query.html#docs-update-by-query-rethrottle | Elasticsearch API documentation} */ -export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise -export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> -export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise -export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] - const querystring: Record = {} - const body = undefined +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.update_by_query_rethrottle + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index beee37e2b..68ddbf7a5 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,31 +21,179 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Watcher { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'watcher.ack_watch': { + path: [ + 'watch_id', + 'action_id' + ], + body: [], + query: [] + }, + 'watcher.activate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.deactivate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.delete_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.execute_watch': { + path: [ + 'id' + ], + body: [ + 'action_modes', + 'alternative_input', + 'ignore_condition', + 'record_execution', + 'simulated_actions', + 'trigger_data', + 'watch' + ], + query: [ + 'debug' + ] + }, + 'watcher.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.get_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.put_watch': { + path: [ + 'id' + ], + body: [ + 'actions', + 'condition', + 'input', + 'metadata', + 'throttle_period', + 'throttle_period_in_millis', + 'transform', + 'trigger' + ], + query: [ + 'active', + 'if_primary_term', + 'if_seq_no', + 'version' + ] + }, + 'watcher.query_watches': { + path: [], + body: [ + 'from', + 'size', + 'query', + 'sort', + 'search_after' + ], + query: [] + }, + 'watcher.start': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.stats': { + path: [ + 'metric' + ], + body: [], + query: [ + 'emit_stacktraces', + 'metric' + ] + }, + 'watcher.stop': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.update_settings': { + path: [], + body: [ + 'index.auto_expand_replicas', + 'index.number_of_replicas' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-ack-watch | Elasticsearch API documentation} */ - async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise - async ackWatch (this: That, params: T.WatcherAckWatchRequest | TB.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id', 'action_id'] - const querystring: Record = {} - const body = undefined + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.ack_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -88,18 +222,31 @@ export default class Watcher { * Activate a watch. A watch can be either active or inactive. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-activate-watch | Elasticsearch API documentation} */ - async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise - async activateWatch (this: That, params: T.WatcherActivateWatchRequest | TB.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id'] - const querystring: Record = {} - const body = undefined + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.activate_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -120,18 +267,31 @@ export default class Watcher { * Deactivate a watch. A watch can be either active or inactive. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-deactivate-watch | Elasticsearch API documentation} */ - async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise - async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest | TB.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id'] - const querystring: Record = {} - const body = undefined + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.deactivate_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -152,18 +312,31 @@ export default class Watcher { * Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-delete-watch | Elasticsearch API documentation} */ - async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise - async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest | TB.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.delete_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -184,20 +357,27 @@ export default class Watcher { * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-execute-watch | Elasticsearch API documentation} */ - async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise - async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest | TB.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.execute_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -208,9 +388,15 @@ export default class Watcher { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -236,19 +422,32 @@ export default class Watcher { * Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-get-settings | Elasticsearch API documentation} */ - async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise - async getSettings (this: That, params?: T.WatcherGetSettingsRequest | TB.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.get_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -266,18 +465,31 @@ export default class Watcher { * Get a watch. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-get-watch | Elasticsearch API documentation} */ - async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise - async getWatch (this: That, params: T.WatcherGetWatchRequest | TB.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const querystring: Record = {} - const body = undefined + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.get_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -298,20 +510,27 @@ export default class Watcher { * Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. When you add a watch you can also define its initial active state by setting the *active* parameter. When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-put-watch | Elasticsearch API documentation} */ - async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> - async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise - async putWatch (this: That, params: T.WatcherPutWatchRequest | TB.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'throttle_period_in_millis', 'transform', 'trigger'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.put_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } for (const key in params) { @@ -321,9 +540,15 @@ export default class Watcher { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -342,20 +567,27 @@ export default class Watcher { * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-query-watches | Elasticsearch API documentation} */ - async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> - async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise - async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest | TB.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.query_watches'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -366,9 +598,15 @@ export default class Watcher { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -384,19 +622,32 @@ export default class Watcher { * Start the watch service. Start the Watcher service if it is not already running. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-start | Elasticsearch API documentation} */ - async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> - async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise - async start (this: That, params?: T.WatcherStartRequest | TB.WatcherStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.start'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -414,19 +665,32 @@ export default class Watcher { * Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-stats | Elasticsearch API documentation} */ - async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise - async stats (this: That, params?: T.WatcherStatsRequest | TB.WatcherStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric'] - const querystring: Record = {} - const body = undefined + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -454,19 +718,32 @@ export default class Watcher { * Stop the watch service. Stop the Watcher service if it is running. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-stop | Elasticsearch API documentation} */ - async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> - async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise - async stop (this: That, params?: T.WatcherStopRequest | TB.WatcherStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['watcher.stop'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -484,20 +761,27 @@ export default class Watcher { * Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, `index.routing.allocation.include.*` and `index.routing.allocation.require.*`. Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the Watcher shards must always be in the `data_content` tier. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-update-settings | Elasticsearch API documentation} */ - async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> - async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise - async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest | TB.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['index.auto_expand_replicas', 'index.number_of_replicas'] - const querystring: Record = {} - // @ts-expect-error - const userBody: any = params?.body - let body: Record | string - if (typeof userBody === 'string') { - body = userBody - } else { - body = userBody != null ? { ...userBody } : undefined + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.update_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } } params = params ?? {} @@ -508,9 +792,15 @@ export default class Watcher { body[key] = params[key] } else if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { - // @ts-expect-error - querystring[key] = params[key] + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 8ba845195..0ad928776 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,32 +21,67 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -import * as TB from '../typesWithBodyKey' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Xpack { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'xpack.info': { + path: [], + body: [], + query: [ + 'categories', + 'accept_enterprise', + 'human' + ] + }, + 'xpack.usage': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** * Get information. The information provided by the API includes: * Build information including the build number and timestamp. * License information about the currently installed license. * Feature information for the features that are currently enabled and available under the current license. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.19/info-api.html | Elasticsearch API documentation} */ - async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> - async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise - async info (this: That, params?: T.XpackInfoRequest | TB.XpackInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['xpack.info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } @@ -78,19 +99,32 @@ export default class Xpack { * Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/group/endpoint-xpack | Elasticsearch API documentation} */ - async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> - async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise - async usage (this: That, params?: T.XpackUsageRequest | TB.XpackUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const querystring: Record = {} - const body = undefined + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['xpack.usage'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue - } else if (key !== 'body') { + } else if (key !== 'body' && key !== 'querystring') { // @ts-expect-error querystring[key] = params[key] } diff --git a/src/api/index.ts b/src/api/index.ts index e6c74a674..90a71f688 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ diff --git a/src/api/reference.md b/src/api/reference.md new file mode 100644 index 000000000..af0596f91 --- /dev/null +++ b/src/api/reference.md @@ -0,0 +1,14138 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html +comment: | + IMPORTANT: This file is autogenerated, DO NOT send pull requests that change this file directly. + You should update the script that does the generation, which can be found in: + https://github.com/elastic/elastic-client-generator-js +--- + +# API Reference [api-reference] + +## client.bulk [_bulk] +Bulk index or delete documents. +Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. +This reduces overhead and can greatly increase indexing speed. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. +* To use the `index` action, you must have the `create`, `index`, or `write` index privilege. +* To use the `delete` action, you must have the `delete` or `write` index privilege. +* To use the `update` action, you must have the `index` or `write` index privilege. +* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. +* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: + +``` +action_and_meta_data\n +optional_source\n +action_and_meta_data\n +optional_source\n +.... +action_and_meta_data\n +optional_source\n +``` + +The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. +A `create` action fails if a document with the same ID already exists in the target +An `index` action adds or replaces a document as necessary. + +NOTE: Data streams support only the `create` action. +To update or delete a document in a data stream, you must target the backing index containing the document. + +An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. + +A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. + +NOTE: The final line of data must end with a newline character (`\n`). +Each newline character may be preceded by a carriage return (`\r`). +When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. +Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. + +If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. + +A note on the format: the idea here is to make processing as fast as possible. +As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. + +Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. + +There is no "correct" number of actions to perform in a single bulk request. +Experiment with different settings to find the optimal size for your particular workload. +Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. +It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. +For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. + +**Client suppport for bulk requests** + +Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: + +* Go: Check out `esutil.BulkIndexer` +* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` +* Python: Check out `elasticsearch.helpers.*` +* JavaScript: Check out `client.helpers.*` +* .NET: Check out `BulkAllObservable` +* PHP: Check out bulk indexing. +* Ruby: Check out `Elasticsearch::Helpers::BulkHelper` + +**Submitting bulk requests with cURL** + +If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. +The latter doesn't preserve newlines. For example: + +``` +$ cat requests +{ "index" : { "_index" : "test", "_id" : "1" } } +{ "field1" : "value1" } +$ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo +{"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +``` + +**Optimistic concurrency control** + +Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. +The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. + +**Versioning** + +Each bulk item can include the version value using the `version` field. +It automatically follows the behavior of the index or delete operation based on the `_version` mapping. +It also support the `version_type`. + +**Routing** + +Each bulk item can include the routing value using the `routing` field. +It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Wait for active shards** + +When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. + +**Refresh** + +Control when the changes made by this request are visible to search. + +NOTE: Only the shards that receive the bulk request will be affected by refresh. +Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. +The request will only wait for those three shards to refresh. +The other two shards that make up the index do not participate in the `_bulk` request at all. +```ts +client.bulk({ ... }) +``` +### Arguments [_arguments_bulk] + +#### Request (object) [_request_bulk] + +- **`index` (Optional, string)**: The name of the data stream, index, or index alias to perform bulk actions on. +- **`operations` (Optional, { index, create, update, delete } \| { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } \| object[])** +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`list_executed_pipelines` (Optional, boolean)**: If `true`, the response will include the ingest pipelines that were run for each index or create. +- **`pipeline` (Optional, string)**: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. +- **`require_alias` (Optional, boolean)**: If `true`, the request's actions must target an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). + +## client.clearScroll [_clear_scroll] +Clear a scrolling search. +Clear the search context and results for a scrolling search. +```ts +client.clearScroll({ ... }) +``` +### Arguments [_arguments_clear_scroll] + +#### Request (object) [_request_clear_scroll] + +- **`scroll_id` (Optional, string \| string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. + +## client.closePointInTime [_close_point_in_time] +Close a point in time. +A point in time must be opened explicitly before being used in search requests. +The `keep_alive` parameter tells Elasticsearch how long it should persist. +A point in time is automatically closed when the `keep_alive` period has elapsed. +However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. +```ts +client.closePointInTime({ id }) +``` +### Arguments [_arguments_close_point_in_time] + +#### Request (object) [_request_close_point_in_time] + +- **`id` (string)**: The ID of the point-in-time. + +## client.count [_count] +Count search results. +Get the number of documents matching a query. + +The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. +The query is optional. When no query is provided, the API uses `match_all` to count all the documents. + +The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. + +The operation is broadcast across all shards. +For each shard ID group, a replica is chosen and the search is run against it. +This means that replicas increase the scalability of the count. +```ts +client.count({ ... }) +``` +### Arguments [_arguments_count] + +#### Request (object) [_request_count] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`min_score` (Optional, number)**: The minimum `_score` value that documents must have to be included in the result. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`q` (Optional, string)**: The query in Lucene query string syntax. This parameter cannot be used with a request body. + +## client.create [_create] +Create a new document in the index. + +You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs +Using `_create` guarantees that the document is indexed only if it does not already exist. +It returns a 409 response when a document with a same ID already exists in the index. +To update an existing document, you must use the `//_doc/` API. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +**Automatically create data streams and indices** + +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. + +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. + +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. + +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. +```ts +client.create({ id, index }) +``` +### Arguments [_arguments_create] + +#### Request (object) [_request_create] + +- **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. +- **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. +- **`document` (Optional, object)**: A document. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + +## client.delete [_delete] +Delete a document. + +Remove a JSON document from the specified index. + +NOTE: You cannot send deletion requests directly to a data stream. +To delete a document in a data stream, you must target the backing index containing the document. + +**Optimistic concurrency control** + +Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Versioning** + +Each document indexed is versioned. +When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. +Every write operation run on a document, deletes included, causes its version to be incremented. +The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. +The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to delete a document. + +If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. + +For example: + +``` +DELETE /my-index-000001/_doc/1?routing=shard-1 +``` + +This request deletes the document with ID 1, but it is routed based on the user. +The document is not deleted if the correct routing is not specified. + +**Distributed** + +The delete operation gets hashed into a specific shard ID. +It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. +```ts +client.delete({ id, index }) +``` +### Arguments [_arguments_delete] + +#### Request (object) [_request_delete] + +- **`id` (string)**: A unique identifier for the document. +- **`index` (string)**: The name of the target index. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. +- **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + +## client.deleteByQuery [_delete_by_query] +Delete documents. + +Deletes documents that match the specified query. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `delete` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. +When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. +If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. + +NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. + +While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. +A bulk delete request is performed for each batch of matching documents. +If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. +If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. +Any delete requests that completed successfully still stick, they are not rolled back. + +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. + +**Throttling delete requests** + +To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to disable throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +``` +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +``` + +Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Delete by query supports sliced scroll to parallelize the delete process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. +Adding slices to the delete by query operation creates sub-requests which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with slices only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. +* Delete performance scales linearly across available resources with the number of slices. + +Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Cancel a delete by query operation** + +Any delete by query can be canceled using the task cancel API. For example: + +``` +POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +``` + +The task ID can be found by using the get tasks API. + +Cancellation should happen quickly but might take a few seconds. +The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. +```ts +client.deleteByQuery({ index }) +``` +### Arguments [_arguments_delete_by_query] + +#### Request (object) [_request_delete_by_query] + +- **`index` (string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`max_docs` (Optional, number)**: The maximum number of documents to delete. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. +- **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: A sort object that specifies the order of deleted documents. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`from` (Optional, number)**: Skips the specified number of documents. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. +- **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. Defaults to the index-level setting. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. +- **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. +- **`search_timeout` (Optional, string \| -1 \| 0)**: The explicit timeout for each search request. It defaults to no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. +- **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each deletion request waits for active shards. +- **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +## client.deleteByQueryRethrottle [_delete_by_query_rethrottle] +Throttle a delete by query operation. + +Change the number of requests per second for a particular delete by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. +```ts +client.deleteByQueryRethrottle({ task_id }) +``` +### Arguments [_arguments_delete_by_query_rethrottle] + +#### Request (object) [_request_delete_by_query_rethrottle] + +- **`task_id` (string \| number)**: The ID for the task. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. + +## client.deleteScript [_delete_script] +Delete a script or search template. +Deletes a stored script or search template. +```ts +client.deleteScript({ id }) +``` +### Arguments [_arguments_delete_script] + +#### Request (object) [_request_delete_script] + +- **`id` (string)**: The identifier for the stored script or search template. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +## client.exists [_exists] +Check a document. + +Verify that a document exists. +For example, check to see if a document with the `_id` 0 exists: + +``` +HEAD my-index-000001/_doc/0 +``` + +If the document exists, the API returns a status code of `200 - OK`. +If the document doesn’t exist, the API returns `404 - Not Found`. + +**Versioning support** + +You can use the `version` parameter to check the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. +```ts +client.exists({ id, index }) +``` +### Arguments [_arguments_exists] + +#### Request (object) [_request_exists] + +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. +- **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. + +## client.existsSource [_exists_source] +Check for a document source. + +Check whether a document source exists in an index. +For example: + +``` +HEAD my-index-000001/_source/1 +``` + +A document's source is not available if it is disabled in the mapping. +```ts +client.existsSource({ id, index }) +``` +### Arguments [_arguments_exists_source] + +#### Request (object) [_request_exists_source] + +- **`id` (string)**: A unique identifier for the document. +- **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. + +## client.explain [_explain] +Explain a document match result. +Get information about why a specific document matches, or doesn't match, a query. +It computes a score explanation for a query and a specific document. +```ts +client.explain({ id, index }) +``` +### Arguments [_arguments_explain] + +#### Request (object) [_request_explain] + +- **`id` (string)**: The document identifier. +- **`index` (string)**: Index names that are used to limit the request. Only a single index name can be provided to this parameter. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: `True` or `false` to return the `_source` field or not or a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return in the response. +- **`q` (Optional, string)**: The query in the Lucene query string syntax. + +## client.fieldCaps [_field_caps] +Get the field capabilities. + +Get information about the capabilities of fields among multiple indices. + +For data streams, the API returns field capabilities among the stream’s backing indices. +It returns runtime fields like any other field. +For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. +```ts +client.fieldCaps({ ... }) +``` +### Arguments [_arguments_field_caps] + +#### Request (object) [_request_field_caps] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +- **`fields` (Optional, string \| string[])**: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. +- **`runtime_mappings` (Optional, Record)**: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. +- **`filters` (Optional, string)**: A list of filters to apply to the response. +- **`types` (Optional, string[])**: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. +- **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. + +## client.get [_get] +Get a document by its ID. + +Get a document and its source or stored fields from an index. + +By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). +In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. +To turn off realtime behavior, set the `realtime` parameter to false. + +**Source filtering** + +By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. +You can turn off `_source` retrieval by using the `_source` parameter: + +``` +GET my-index-000001/_doc/0?_source=false +``` + +If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. +This can be helpful with large documents where partial retrieval can save on network overhead +Both parameters take a comma separated list of fields or wildcard expressions. +For example: + +``` +GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +``` + +If you only want to specify includes, you can use a shorter notation: + +``` +GET my-index-000001/_doc/0?_source=*.id +``` + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to retrieve a document. +For example: + +``` +GET my-index-000001/_doc/2?routing=user1 +``` + +This request gets the document with ID 2, but it is routed based on the user. +The document is not fetched if the correct routing is not specified. + +**Distributed** + +The GET operation is hashed into a specific shard ID. +It is then redirected to one of the replicas within that shard ID and returns the result. +The replicas are the primary shard and its replicas within that shard ID group. +This means that the more replicas you have, the better your GET scaling will be. + +**Versioning support** + +You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. +```ts +client.get({ id, index }) +``` +### Arguments [_arguments_get] + +#### Request (object) [_request_get] + +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: The name of the index that contains the document. +- **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. + +## client.getScript [_get_script] +Get a script or search template. +Retrieves a stored script or search template. +```ts +client.getScript({ id }) +``` +### Arguments [_arguments_get_script] + +#### Request (object) [_request_get_script] + +- **`id` (string)**: The identifier for the stored script or search template. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +## client.getScriptContext [_get_script_context] +Get script contexts. + +Get a list of supported script contexts and their methods. +```ts +client.getScriptContext() +``` + +## client.getScriptLanguages [_get_script_languages] +Get script languages. + +Get a list of available script types, languages, and contexts. +```ts +client.getScriptLanguages() +``` + +## client.getSource [_get_source] +Get a document's source. + +Get the source of a document. +For example: + +``` +GET my-index-000001/_source/1 +``` + +You can use the source filtering parameters to control which parts of the `_source` are returned: + +``` +GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +``` +```ts +client.getSource({ id, index }) +``` +### Arguments [_arguments_get_source] + +#### Request (object) [_request_get_source] + +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: The name of the index that contains the document. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. + +## client.healthReport [_health_report] +Get the cluster health. +Get a report with the health status of an Elasticsearch cluster. +The report contains a list of indicators that compose Elasticsearch functionality. + +Each indicator has a health status of: green, unknown, yellow or red. +The indicator will provide an explanation and metadata describing the reason for its current health status. + +The cluster’s status is controlled by the worst indicator status. + +In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. +Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. + +Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. +The root cause and remediation steps are encapsulated in a diagnosis. +A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. + +NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. +When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. +```ts +client.healthReport({ ... }) +``` +### Arguments [_arguments_health_report] + +#### Request (object) [_request_health_report] + +- **`feature` (Optional, string \| string[])**: A feature of the cluster, as returned by the top-level health report API. +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout. +- **`verbose` (Optional, boolean)**: Opt-in for more information about the health of the system. +- **`size` (Optional, number)**: Limit the number of affected resources the health report API returns. + +## client.index [_index] +Create or update a document in an index. + +Add a JSON document to the specified data stream or index and make it searchable. +If the target is an index and the document already exists, the request updates the document and increments its version. + +NOTE: You cannot use this API to send update requests for existing documents in a data stream. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. +* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +NOTE: Replica shards might not all be started when an indexing operation returns successfully. +By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. + +**Automatically create data streams and indices** + +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. + +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. + +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. + +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. + +**Optimistic concurrency control** + +Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + +**No operation (noop) updates** + +When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. +If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. +The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. + +There isn't a definitive rule for when noop updates aren't acceptable. +It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. + +**Versioning** + +Each indexed document is given a version number. +By default, internal versioning is used that starts at 1 and increments with each update, deletes included. +Optionally, the version number can be set to an external value (for example, if maintained in a database). +To enable this functionality, `version_type` should be set to `external`. +The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. + +NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. +If no version is provided, the operation runs without any version checks. + +When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. +If true, the document will be indexed and the new version number used. +If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: + +``` +PUT my-index-000001/_doc/1?version=2&version_type=external +{ + "user": { + "id": "elkbee" + } +} + +In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. +If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). + +A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. +Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. +```ts +client.index({ index }) +``` +### Arguments [_arguments_index] + +#### Request (object) [_request_index] + +- **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. +- **`id` (Optional, string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. +- **`document` (Optional, object)**: A document. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`op_type` (Optional, Enum("index" \| "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). + +## client.info [_info] +Get cluster info. +Get basic build, version, and cluster information. +::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored. +```ts +client.info() +``` + +## client.knnSearch [_knn_search] +Run a knn search. + +NOTE: The kNN search API has been replaced by the `knn` option in the search API. + +Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. +Given a query vector, the API finds the k closest vectors and returns those documents as search hits. + +Elasticsearch uses the HNSW algorithm to support efficient kNN search. +Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. +This means the results returned are not always the true k closest neighbors. + +The kNN search API supports restricting the search using a filter. +The search will return the top k documents that also match the filter query. + +A kNN search response has the exact same structure as a search API response. +However, certain sections have a meaning specific to kNN search: + +* The document `_score` is determined by the similarity between the query and document vector. +* The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. +```ts +client.knnSearch({ index, knn }) +``` +### Arguments [_arguments_knn_search] + +#### Request (object) [_request_knn_search] + +- **`index` (string \| string[])**: A list of index names to search; use `_all` or to perform the operation on all indices. +- **`knn` ({ field, query_vector, k, num_candidates })**: The kNN query to run. +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +- **`fields` (Optional, string \| string[])**: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } \| { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. +- **`routing` (Optional, string)**: A list of specific routing values. + +## client.mget [_mget] +Get multiple documents. + +Get multiple JSON documents by ID from one or more indices. +If you specify an index in the request URI, you only need to specify the document IDs in the request body. +To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. + +**Filter source fields** + +By default, the `_source` field is returned for every document (if stored). +Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. +You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. + +**Get stored fields** + +Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. +Any requested fields that are not stored are ignored. +You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. +```ts +client.mget({ ... }) +``` +### Arguments [_arguments_mget] + +#### Request (object) [_request_mget] + +- **`index` (Optional, string)**: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. +- **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: The documents you want to retrieve. Required if no index is specified in the request URI. +- **`ids` (Optional, string \| string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. +- **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. +- **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes relevant shards before retrieving documents. +- **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: True or false to return the `_source` field or not, or a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. + +## client.msearch [_msearch] +Run multiple searches. + +The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. +The structure is as follows: + +``` +header\n +body\n +header\n +body\n +``` + +This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. + +IMPORTANT: The final line of data must end with a newline character `\n`. +Each newline character may be preceded by a carriage return `\r`. +When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. +```ts +client.msearch({ ... }) +``` +### Arguments [_arguments_msearch] + +#### Request (object) [_request_msearch] + +- **`index` (Optional, string \| string[])**: List of data streams, indices, and index aliases to search. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +- **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +- **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. + +## client.msearchTemplate [_msearch_template] +Run multiple templated searches. + +Run multiple templated searches with a single request. +If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. +For example: + +``` +$ cat requests +{ "index": "my-index" } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ "index": "my-other-index" } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} + +$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo +``` +```ts +client.msearchTemplate({ ... }) +``` +### Arguments [_arguments_msearch_template] + +#### Request (object) [_request_msearch_template] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. +- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. +- **`max_concurrent_searches` (Optional, number)**: The maximum number of concurrent searches the API can run. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. +- **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. +- **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. + +## client.mtermvectors [_mtermvectors] +Get multiple term vectors. + +Get multiple term vectors with a single request. +You can specify existing documents by index and ID or provide artificial documents in the body of the request. +You can specify the index in the request body or request URI. +The response contains a `docs` array with all the fetched termvectors. +Each element has the structure provided by the termvectors API. + +**Artificial documents** + +You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. +The mapping used is determined by the specified `_index`. +```ts +client.mtermvectors({ ... }) +``` +### Arguments [_arguments_mtermvectors] + +#### Request (object) [_request_mtermvectors] + +- **`index` (Optional, string)**: The name of the index that contains the documents. +- **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: An array of existing or artificial documents. +- **`ids` (Optional, string[])**: A simplified syntax to specify documents by their ID if they're in the same index. +- **`fields` (Optional, string \| string[])**: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +- **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. +- **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. +- **`positions` (Optional, boolean)**: If `true`, the response includes term positions. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. +- **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. + +## client.openPointInTime [_open_point_in_time] +Open a point in time. + +A search request by default runs against the most recent visible data of the target indices, +which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the +state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple +search requests using the same point in time. For example, if refreshes happen between +`search_after` requests, then the results of those requests might not be consistent as changes happening +between searches are only visible to the more recent point in time. + +A point in time must be opened explicitly before being used in search requests. + +A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. + +Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. +If you want to retrieve more hits, use PIT with `search_after`. + +IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. + +When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. + +**Keeping point in time alive** + +The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. +The value does not need to be long enough to process all data — it just needs to be long enough for the next request. + +Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. +Once the smaller segments are no longer needed they are deleted. +However, open point-in-times prevent the old segments from being deleted since they are still in use. + +TIP: Keeping older segments alive means that more disk space and file handles are needed. +Ensure that you have configured your nodes to have ample free file handles. + +Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. +Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. +Note that a point-in-time doesn't prevent its associated indices from being deleted. +You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. +```ts +client.openPointInTime({ index, keep_alive }) +``` +### Arguments [_arguments_open_point_in_time] + +#### Request (object) [_request_open_point_in_time] + +- **`index` (string \| string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices +- **`keep_alive` (string \| -1 \| 0)**: Extend the length of time that the point in time persists. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. + +## client.ping [_ping] +Ping the cluster. +Get information about whether the cluster is running. +```ts +client.ping() +``` + +## client.putScript [_put_script] +Create or update a script or search template. +Creates or updates a stored script or search template. +```ts +client.putScript({ id, script }) +``` +### Arguments [_arguments_put_script] + +#### Request (object) [_request_put_script] + +- **`id` (string)**: The identifier for the stored script or search template. It must be unique within the cluster. +- **`script` ({ lang, options, source })**: The script or search template, its parameters, and its language. +- **`context` (Optional, string)**: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +## client.rankEval [_rank_eval] +Evaluate ranked search results. + +Evaluate the quality of ranked search results over a set of typical search queries. +```ts +client.rankEval({ requests }) +``` +### Arguments [_arguments_rank_eval] + +#### Request (object) [_request_rank_eval] + +- **`requests` ({ id, request, ratings, template_id, params }[])**: A set of typical search requests, together with their provided ratings. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +- **`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })**: Definition of the evaluation metric to calculate. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`search_type` (Optional, string)**: Search operation type + +## client.reindex [_reindex] +Reindex documents. + +Copy documents from a source to a destination. +You can copy all documents to the destination index or reindex a subset of the documents. +The source can be any existing index, alias, or data stream. +The destination must differ from the source. +For example, you cannot reindex a data stream into itself. + +IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. +The destination should be configured as wanted before calling the reindex API. +Reindex does not copy the settings from the source or its associated template. +Mappings, shard counts, and replicas, for example, must be configured ahead of time. + +If the Elasticsearch security features are enabled, you must have the following security privileges: + +* The `read` index privilege for the source data stream, index, or alias. +* The `write` index privilege for the destination data stream, index, or index alias. +* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. +* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. + +If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. +Automatic data stream creation requires a matching index template with data stream enabled. + +The `dest` element can be configured like the index API to control optimistic concurrency control. +Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. + +Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. + +Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. +All existing documents will cause a version conflict. + +IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. +A reindex can only add new documents to a destination data stream. +It cannot update existing documents in a destination data stream. + +By default, version conflicts abort the reindex process. +To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. +In this case, the response includes a count of the version conflicts that were encountered. +Note that the handling of other error types is unaffected by the `conflicts` property. +Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. + +NOTE: The reindex API makes no effort to handle ID collisions. +The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. +Instead, make sure that IDs are unique by using a script. + +**Running reindex asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `_tasks/`. + +**Reindex from multiple sources** + +If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. +That way you can resume the process if there are any errors by removing the partially completed source and starting over. +It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. + +For example, you can use a bash script like this: + +``` +for index in i1 i2 i3 i4 i5; do + curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ + "source": { + "index": "'$index'" + }, + "dest": { + "index": "'$index'-reindexed" + } + }' +done +``` + +**Throttling** + +Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. +Requests are throttled by padding each batch with a wait time. +To turn off throttling, set `requests_per_second` to `-1`. + +The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +``` +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +``` + +Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Reindex supports sliced scroll to parallelize the reindexing process. +This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. + +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + +You can slice a reindex request manually by providing a slice ID and total number of slices to each request. +You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. +The `slices` parameter specifies the number of slices to use. + +Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: + +* You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. +* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. + +If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. +If slicing manually or otherwise tuning automatic slicing, use the following guidelines. + +Query performance is most efficient when the number of slices is equal to the number of shards in the index. +If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. +Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. + +Indexing performance scales linearly across available resources with the number of slices. + +Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Modify documents during reindexing** + +Like `_update_by_query`, reindex operations support a script that modifies the document. +Unlike `_update_by_query`, the script is allowed to modify the document's metadata. + +Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. +For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. +Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. +The deletion will be reported in the `deleted` counter in the response body. +Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. + +Think of the possibilities! Just be careful; you are able to change: + +* `_id` +* `_index` +* `_version` +* `_routing` + +Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. +It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. + +**Reindex from remote** + +Reindex supports reindexing from a remote Elasticsearch cluster. +The `host` parameter must contain a scheme, host, port, and optional path. +The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. +Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. +There are a range of settings available to configure the behavior of the HTTPS connection. + +When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. +Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. +It can be set to a comma delimited list of allowed remote host and port combinations. +Scheme is ignored; only the host and port are used. +For example: + +``` +reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] +``` + +The list of allowed hosts must be configured on any nodes that will coordinate the reindex. +This feature should work with remote clusters of any version of Elasticsearch. +This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. + +WARNING: Elasticsearch does not support forward compatibility across major versions. +For example, you cannot reindex from a 7.x cluster into a 6.x cluster. + +To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. + +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + +Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. +If the remote index includes very large documents you'll need to use a smaller batch size. +It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. +Both default to 30 seconds. + +**Configuring SSL parameters** + +Reindex from remote supports configurable SSL settings. +These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. +It is not possible to configure SSL in the body of the reindex request. +```ts +client.reindex({ dest, source }) +``` +### Arguments [_arguments_reindex] + +#### Request (object) [_request_reindex] + +- **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination you are copying to. +- **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source you are copying from. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. +- **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. +- **`size` (Optional, number)** +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. +- **`scroll` (Optional, string \| -1 \| 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. + +## client.reindexRethrottle [_reindex_rethrottle] +Throttle a reindex operation. + +Change the number of requests per second for a particular reindex operation. +For example: + +``` +POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +``` + +Rethrottling that speeds up the query takes effect immediately. +Rethrottling that slows down the query will take effect after completing the current batch. +This behavior prevents scroll timeouts. +```ts +client.reindexRethrottle({ task_id }) +``` +### Arguments [_arguments_reindex_rethrottle] + +#### Request (object) [_request_reindex_rethrottle] + +- **`task_id` (string)**: The task identifier, which can be found by using the tasks API. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. + +## client.renderSearchTemplate [_render_search_template] +Render a search template. + +Render a search template as a search request body. +```ts +client.renderSearchTemplate({ ... }) +``` +### Arguments [_arguments_render_search_template] + +#### Request (object) [_request_render_search_template] + +- **`id` (Optional, string)**: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. +- **`file` (Optional, string)** +- **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +- **`source` (Optional, string)**: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. + +## client.scriptsPainlessExecute [_scripts_painless_execute] +Run a script. + +Runs a script and returns a result. +Use this API to build and test scripts, such as when defining a script for a runtime field. +This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. + +The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. + +Each context requires a script, but additional parameters depend on the context you're using for that script. +```ts +client.scriptsPainlessExecute({ ... }) +``` +### Arguments [_arguments_scripts_painless_execute] + +#### Request (object) [_request_scripts_painless_execute] + +- **`context` (Optional, Enum("painless_test" \| "filter" \| "score" \| "boolean_field" \| "date_field" \| "double_field" \| "geo_point_field" \| "ip_field" \| "keyword_field" \| "long_field" \| "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. +- **`context_setup` (Optional, { document, index, query })**: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. +- **`script` (Optional, { source, id, params, lang, options })**: The Painless script to run. + +## client.scroll [_scroll] +Run a scrolling search. + +IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). + +The scroll API gets large sets of results from a single scrolling search request. +To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. +The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. +The search response returns a scroll ID in the `_scroll_id` response body parameter. +You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. +If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. + +You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. + +IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. +```ts +client.scroll({ scroll_id }) +``` +### Arguments [_arguments_scroll] + +#### Request (object) [_request_scroll] + +- **`scroll_id` (string)**: The scroll ID of the search. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. + +## client.search [_search] +Run a search. + +Get search hits that match the query defined in the request. +You can provide search queries using the `q` query string parameter or the request body. +If both are specified, only the query parameter is used. + +If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. +To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. + +**Search slicing** + +When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. +By default the splitting is done first on the shards, then locally on each shard. +The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. + +For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. + +IMPORTANT: The same point-in-time ID should be used for all slices. +If different PIT IDs are used, slices can overlap and miss documents. +This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. +```ts +client.search({ ... }) +``` +### Arguments [_arguments_search] + +#### Request (object) [_request_search] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. +- **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. +- **`highlight` (Optional, { encoder, fields })**: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. +- **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. +- **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. +- **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule, rescorer, linear, pinned })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. +- **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. +- **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. +- **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. +- **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. +- **`timeout` (Optional, string)**: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If `true`, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If `true`, the request returns the document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If `true`, the request returns sequence number and primary term of the last modification of each hit. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. +- **`runtime_mappings` (Optional, Record)**: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`allow_partial_search_results` (Optional, boolean)**: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +- **`min_compatible_shard_node` (Optional, string)**: The minimum version of the node that can handle the request Any handling node with a lower version will fail the request. +- **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node; * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method; * `_only_nodes:,` to run the search on only the specified nodes IDs, where, if suitable shards exist on more than one selected node, use shards on those nodes using the default method, or if none of the specified nodes are available, select shards from any available node using the default method; * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs, or if not, select shards using the default method; * `_shards:,` to run the search only on the specified shards; * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. +- **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. +- **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. +- **`suggest_field` (Optional, string)**: The field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_size` (Optional, number)**: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. +- **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. + +## client.searchMvt [_search_mvt] +Search a vector tile. + +Search a vector tile for geospatial values. +Before using this API, you should be familiar with the Mapbox vector tile specification. +The API returns results as a binary mapbox vector tile. + +Internally, Elasticsearch translates a vector tile search API request into a search containing: + +* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. +* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. +* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. +* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. + +For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search + +``` +GET my-index/_search +{ + "size": 10000, + "query": { + "geo_bounding_box": { + "my-geo-field": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "aggregations": { + "grid": { + "geotile_grid": { + "field": "my-geo-field", + "precision": 11, + "size": 65536, + "bounds": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "bounds": { + "geo_bounds": { + "field": "my-geo-field", + "wrap_longitude": false + } + } + } +} +``` + +The API returns results as a binary Mapbox vector tile. +Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: + +* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. +* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. +* A meta layer containing: + * A feature containing a bounding box. By default, this is the bounding box of the tile. + * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. + * Metadata for the search. + +The API only returns features that can display at its zoom level. +For example, if a polygon feature has no area at its zoom level, the API omits it. +The API returns errors as UTF-8 encoded JSON. + +IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. +If you specify both parameters, the query parameter takes precedence. + +**Grid precision for geotile** + +For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. +`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. +For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. +The maximum final precision is 29. +The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. +For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +The `aggs` layer only contains features for cells with matching data. + +**Grid precision for geohex** + +For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. + +This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. +The following table maps the H3 resolution for each precision. +For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +At a precision of 6, hexagonal cells have an H3 resolution of 2. +If `` is 3 and `grid_precision` is 4, the precision is 7. +At a precision of 7, hexagonal cells have an H3 resolution of 3. + +| Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +| --------- | ---------------- | ------------- | ----------------| ----- | +| 1 | 4 | 0 | 122 | 30.5 | +| 2 | 16 | 0 | 122 | 7.625 | +| 3 | 64 | 1 | 842 | 13.15625 | +| 4 | 256 | 1 | 842 | 3.2890625 | +| 5 | 1024 | 2 | 5882 | 5.744140625 | +| 6 | 4096 | 2 | 5882 | 1.436035156 | +| 7 | 16384 | 3 | 41162 | 2.512329102 | +| 8 | 65536 | 3 | 41162 | 0.6280822754 | +| 9 | 262144 | 4 | 288122 | 1.099098206 | +| 10 | 1048576 | 4 | 288122 | 0.2747745514 | +| 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +| 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +| 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +| 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +| 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + +Hexagonal cells don't align perfectly on a vector tile. +Some cells may intersect more than one vector tile. +To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. +Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. +```ts +client.searchMvt({ index, field, zoom, x, y }) +``` +### Arguments [_arguments_search_mvt] + +#### Request (object) [_request_search_mvt] + +- **`index` (string \| string[])**: List of data streams, indices, or aliases to search +- **`field` (string)**: Field containing geospatial data to return +- **`zoom` (number)**: Zoom level for the vector tile to search +- **`x` (number)**: X coordinate for the vector tile to search +- **`y` (number)**: Y coordinate for the vector tile to search +- **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. +- **`buffer` (Optional, number)**: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +- **`exact_bounds` (Optional, boolean)**: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. +- **`extent` (Optional, number)**: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. +- **`fields` (Optional, string \| string[])**: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +- **`grid_agg` (Optional, Enum("geotile" \| "geohex"))**: The aggregation used to create a grid for the `field`. +- **`grid_precision` (Optional, number)**: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. +- **`grid_type` (Optional, Enum("grid" \| "point" \| "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The query DSL used to filter documents for the search. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +- **`size` (Optional, number)**: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. +- **`track_total_hits` (Optional, boolean \| number)**: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. + +## client.searchShards [_search_shards] +Get the search shards. + +Get the indices and shards that a search request would be run against. +This information can be useful for working out issues or planning optimizations with routing and shard preferences. +When filtered aliases are used, the filter is returned as part of the `indices` section. + +If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. +```ts +client.searchShards({ ... }) +``` +### Arguments [_arguments_search_shards] + +#### Request (object) [_request_search_shards] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + +## client.searchTemplate [_search_template] +Run a search with a search template. +```ts +client.searchTemplate({ ... }) +``` +### Arguments [_arguments_search_template] + +#### Request (object) [_request_search_template] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). +- **`explain` (Optional, boolean)**: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. +- **`id` (Optional, string)**: The ID of the search template to use. If no `source` is specified, this parameter is required. +- **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +- **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. +- **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`scroll` (Optional, string \| -1 \| 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. +- **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. +- **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. + +## client.termsEnum [_terms_enum] +Get terms in an index. + +Discover terms that match a partial string in an index. +This API is designed for low-latency look-ups used in auto-complete scenarios. + +> info +> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. +```ts +client.termsEnum({ index, field }) +``` +### Arguments [_arguments_terms_enum] + +#### Request (object) [_request_terms_enum] + +- **`index` (string)**: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. +- **`size` (Optional, number)**: The number of matching terms to return. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. +- **`case_insensitive` (Optional, boolean)**: When `true`, the provided search string is matched against index terms without case sensitivity. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter an index shard if the provided query rewrites to `match_none`. +- **`string` (Optional, string)**: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. +- **`search_after` (Optional, string)**: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. + +## client.termvectors [_termvectors] +Get term vector information. + +Get information and statistics about terms in the fields of a particular document. + +You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. +You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. +For example: + +``` +GET /my-index-000001/_termvectors/1?fields=message +``` + +Fields can be specified using wildcards, similar to the multi match query. + +Term vectors are real-time by default, not near real-time. +This can be changed by setting `realtime` parameter to `false`. + +You can request three types of values: _term information_, _term statistics_, and _field statistics_. +By default, all term information and field statistics are returned for all fields but term statistics are excluded. + +**Term information** + +* term frequency in the field (always returned) +* term positions (`positions: true`) +* start and end offsets (`offsets: true`) +* term payloads (`payloads: true`), as base64 encoded bytes + +If the requested information wasn't stored in the index, it will be computed on the fly if possible. +Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. + +> warn +> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. + +**Behaviour** + +The term and field statistics are not accurate. +Deleted documents are not taken into account. +The information is only retrieved for the shard the requested document resides in. +The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. +By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. +Use `routing` only to hit a particular shard. +```ts +client.termvectors({ index }) +``` +### Arguments [_arguments_termvectors] + +#### Request (object) [_request_termvectors] + +- **`index` (string)**: The name of the index that contains the document. +- **`id` (Optional, string)**: A unique identifier for the document. +- **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. +- **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. +- **`per_field_analyzer` (Optional, Record)**: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. +- **`fields` (Optional, string[])**: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`field_statistics` (Optional, boolean)**: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). +- **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. +- **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. +- **`positions` (Optional, boolean)**: If `true`, the response includes term positions. +- **`term_statistics` (Optional, boolean)**: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. + +## client.update [_update] +Update a document. + +Update a document by running a script or passing a partial document. + +If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. + +The script can update, delete, or skip modifying the document. +The API also supports passing a partial document, which is merged into the existing document. +To fully replace an existing document, use the index API. +This operation: + +* Gets the document (collocated with the shard) from the index. +* Runs the specified script. +* Indexes the result. + +The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. + +The `_source` field must be enabled to use this API. +In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). +```ts +client.update({ id, index }) +``` +### Arguments [_arguments_update] + +#### Request (object) [_request_update] + +- **`id` (string)**: A unique identifier for the document to be updated. +- **`index` (string)**: The name of the target index. By default, the index is created automatically if it doesn't exist. +- **`detect_noop` (Optional, boolean)**: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. +- **`doc` (Optional, object)**: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. +- **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. +- **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. +- **`upsert` (Optional, object)**: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`lang` (Optional, string)**: The script language. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`retry_on_conflict` (Optional, number)**: The number of times the operation should be retried when a conflict occurs. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. +- **`_source_excludes` (Optional, string \| string[])**: The source fields you want to exclude. +- **`_source_includes` (Optional, string \| string[])**: The source fields you want to retrieve. + +## client.updateByQuery [_update_by_query] +Update documents. +Updates documents that match the specified query. +If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `index` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. + +When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. +When the versions match, the document is updated and the version number is incremented. +If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. + +NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. + +While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. +A bulk update request is performed for each batch of matching documents. +Any query or update failures cause the update by query request to fail and the failures are shown in the response. +Any update requests that completed successfully still stick, they are not rolled back. + +**Throttling update requests** + +To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to turn off throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is 1000, so if `requests_per_second` is set to `500`: + +``` +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +``` + +Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Update by query supports sliced scroll to parallelize the update process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. + +Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with slices will cancel each sub-request. +* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. +* Update performance scales linearly across available resources with the number of slices. + +Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Update the document source** + +Update by query supports scripts to update the document source. +As with the update API, you can set `ctx.op` to change the operation that is performed. + +Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. +The update by query operation skips updating the document and increments the `noop` counter. + +Set `ctx.op = "delete"` if your script decides that the document should be deleted. +The update by query operation deletes the document and increments the `deleted` counter. + +Update by query supports only `index`, `noop`, and `delete`. +Setting `ctx.op` to anything else is an error. +Setting any other field in `ctx` is an error. +This API enables you to only modify the source of matching documents; you cannot move them. +```ts +client.updateByQuery({ index }) +``` +### Arguments [_arguments_update_by_query] + +#### Request (object) [_request_update_by_query] + +- **`index` (string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`max_docs` (Optional, number)**: The maximum number of documents to update. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to update using the Query DSL. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when updating. +- **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`from` (Optional, number)**: Skips the specified number of documents. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. +- **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. +- **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. It defaults to the index-level setting. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. +- **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. +- **`search_timeout` (Optional, string \| -1 \| 0)**: An explicit timeout for each search request. By default, there is no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. +- **`sort` (Optional, string[])**: A list of : pairs. +- **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, boolean)**: Should the document increment the version number (internal) on hit or not (reindex) +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +## client.updateByQueryRethrottle [_update_by_query_rethrottle] +Throttle an update by query operation. + +Change the number of requests per second for a particular update by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. +```ts +client.updateByQueryRethrottle({ task_id }) +``` +### Arguments [_arguments_update_by_query_rethrottle] + +#### Request (object) [_request_update_by_query_rethrottle] + +- **`task_id` (string)**: The ID for the task. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. + +## client.asyncSearch.delete [_async_search.delete] +Delete an async search. + +If the asynchronous search is still running, it is cancelled. +Otherwise, the saved search results are deleted. +If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. +```ts +client.asyncSearch.delete({ id }) +``` + +### Arguments [_arguments_async_search.delete] + +#### Request (object) [_request_async_search.delete] +- **`id` (string)**: A unique identifier for the async search. + +## client.asyncSearch.get [_async_search.get] +Get async search results. + +Retrieve the results of a previously submitted asynchronous search request. +If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. +```ts +client.asyncSearch.get({ id }) +``` + +### Arguments [_arguments_async_search.get] + +#### Request (object) [_request_async_search.get] +- **`id` (string)**: A unique identifier for the async search. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The length of time that the async search should be available in the cluster. +When not specified, the `keep_alive` set with the corresponding submit async request will be used. +Otherwise, it is possible to override the value and extend the validity of the request. +When this period expires, the search, if still running, is cancelled. +If the search is completed, its saved results are deleted. +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Specifies to wait for the search to be completed up until the provided timeout. +Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. +By default no timeout is set meaning that the currently available results will be returned without any additional wait. + +## client.asyncSearch.status [_async_search.status] +Get the async search status. + +Get the status of a previously submitted async search request given its identifier, without retrieving search results. +If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: + +* The user or API key that submitted the original async search request. +* Users that have the `monitor` cluster privilege or greater privileges. +```ts +client.asyncSearch.status({ id }) +``` + +### Arguments [_arguments_async_search.status] + +#### Request (object) [_request_async_search.status] +- **`id` (string)**: A unique identifier for the async search. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The length of time that the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. + +## client.asyncSearch.submit [_async_search.submit] +Run an async search. + +When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. + +Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. + +By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. +The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. +```ts +client.asyncSearch.submit({ ... }) +``` + +### Arguments [_arguments_async_search.submit] + +#### Request (object) [_request_async_search.submit] +- **`index` (Optional, string \| string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices +- **`aggregations` (Optional, Record)** +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** +- **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +- **`highlight` (Optional, { encoder, fields })** +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +- **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. +- **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are +not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** +- **`profile` (Optional, boolean)** +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])** +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])** +- **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +- **`slice` (Optional, { field, id, max })** +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +- **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +- **`suggest` (Optional, { text })** +- **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +- **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If true, returns document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +- **`stored_fields` (Optional, string \| string[])**: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Blocks and waits until the search is completed up to a certain timeout. +When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. +- **`keep_alive` (Optional, string \| -1 \| 0)**: Specifies how long the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. +- **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`allow_partial_search_results` (Optional, boolean)**: Indicate if an error should be returned if there is a partial search failure or timeout +- **`analyzer` (Optional, string)**: The analyzer to use for the query string +- **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed (default: false) +- **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. +A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). +- **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query (AND or OR) +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests +- **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) +- **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true +- **`routing` (Optional, string)**: A list of specific routing values +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Search operation type +- **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))**: Specify suggest mode +- **`suggest_size` (Optional, number)**: How many suggestions to return in response +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +- **`_source_excludes` (Optional, string \| string[])**: A list of fields to exclude from the returned _source field +- **`_source_includes` (Optional, string \| string[])**: A list of fields to extract and return from the _source field +- **`q` (Optional, string)**: Query in the Lucene query string syntax + +## client.autoscaling.deleteAutoscalingPolicy [_autoscaling.delete_autoscaling_policy] +Delete an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +```ts +client.autoscaling.deleteAutoscalingPolicy({ name }) +``` + +### Arguments [_arguments_autoscaling.delete_autoscaling_policy] + +#### Request (object) [_request_autoscaling.delete_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.autoscaling.getAutoscalingCapacity [_autoscaling.get_autoscaling_capacity] +Get the autoscaling capacity. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +This API gets the current autoscaling capacity based on the configured autoscaling policy. +It will return information to size the cluster appropriately to the current workload. + +The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. + +The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. + +The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. +This information is provided for diagnosis only. +Do not use this information to make autoscaling decisions. +```ts +client.autoscaling.getAutoscalingCapacity({ ... }) +``` + +### Arguments [_arguments_autoscaling.get_autoscaling_capacity] + +#### Request (object) [_request_autoscaling.get_autoscaling_capacity] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.autoscaling.getAutoscalingPolicy [_autoscaling.get_autoscaling_policy] +Get an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +```ts +client.autoscaling.getAutoscalingPolicy({ name }) +``` + +### Arguments [_arguments_autoscaling.get_autoscaling_policy] + +#### Request (object) [_request_autoscaling.get_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.autoscaling.putAutoscalingPolicy [_autoscaling.put_autoscaling_policy] +Create or update an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. +```ts +client.autoscaling.putAutoscalingPolicy({ name }) +``` + +### Arguments [_arguments_autoscaling.put_autoscaling_policy] + +#### Request (object) [_request_autoscaling.put_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`policy` (Optional, { roles, deciders })** +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.cat.aliases [_cat.aliases] +Get aliases. + +Get the cluster's index aliases, including filter and routing information. +This API does not return data stream aliases. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. +```ts +client.cat.aliases({ ... }) +``` + +### Arguments [_arguments_cat.aliases] + +#### Request (object) [_request_cat.aliases] +- **`name` (Optional, string \| string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`h` (Optional, Enum("alias" \| "index" \| "filter" \| "routing.index" \| "routing.search" \| "is_write_index") \| Enum("alias" \| "index" \| "filter" \| "routing.index" \| "routing.search" \| "is_write_index")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. + +## client.cat.allocation [_cat.allocation] +Get shard allocation information. + +Get a snapshot of the number of shards allocated to each data node and their disk space. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. +```ts +client.cat.allocation({ ... }) +``` + +### Arguments [_arguments_cat.allocation] + +#### Request (object) [_request_cat.allocation] +- **`node_id` (Optional, string \| string[])**: A list of node identifiers or names used to limit the returned information. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("shards" \| "shards.undesired" \| "write_load.forecast" \| "disk.indices.forecast" \| "disk.indices" \| "disk.used" \| "disk.avail" \| "disk.total" \| "disk.percent" \| "host" \| "ip" \| "node" \| "node.role") \| Enum("shards" \| "shards.undesired" \| "write_load.forecast" \| "disk.indices.forecast" \| "disk.indices" \| "disk.used" \| "disk.avail" \| "disk.total" \| "disk.percent" \| "host" \| "ip" \| "node" \| "node.role")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.componentTemplates [_cat.component_templates] +Get component templates. + +Get information about component templates in a cluster. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get component template API. +```ts +client.cat.componentTemplates({ ... }) +``` + +### Arguments [_arguments_cat.component_templates] + +#### Request (object) [_request_cat.component_templates] +- **`name` (Optional, string)**: The name of the component template. +It accepts wildcard expressions. +If it is omitted, all component templates are returned. +- **`h` (Optional, Enum("name" \| "version" \| "alias_count" \| "mapping_count" \| "settings_count" \| "metadata_count" \| "included_in") \| Enum("name" \| "version" \| "alias_count" \| "mapping_count" \| "settings_count" \| "metadata_count" \| "included_in")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. + +## client.cat.count [_cat.count] +Get a document count. + +Get quick access to a document count for a data stream, an index, or an entire cluster. +The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the count API. +```ts +client.cat.count({ ... }) +``` + +### Arguments [_arguments_cat.count] + +#### Request (object) [_request_cat.count] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. +It supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`h` (Optional, Enum("epoch" \| "timestamp" \| "count") \| Enum("epoch" \| "timestamp" \| "count")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.fielddata [_cat.fielddata] +Get field data cache information. + +Get the amount of heap memory currently used by the field data cache on every data node in the cluster. + +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the nodes stats API. +```ts +client.cat.fielddata({ ... }) +``` + +### Arguments [_arguments_cat.fielddata] + +#### Request (object) [_request_cat.fielddata] +- **`fields` (Optional, string \| string[])**: List of fields used to limit returned information. +To retrieve all fields, omit this parameter. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("id" \| "host" \| "ip" \| "node" \| "field" \| "size") \| Enum("id" \| "host" \| "ip" \| "node" \| "field" \| "size")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.health [_cat.health] +Get the cluster health status. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the cluster health API. +This API is often used to check malfunctioning clusters. +To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: +`HH:MM:SS`, which is human-readable but includes no date information; +`Unix epoch time`, which is machine-sortable and includes date information. +The latter format is useful for cluster recoveries that take multiple days. +You can use the cat health API to verify cluster health across multiple nodes. +You also can use the API to track the recovery of a large cluster over a longer period of time. +```ts +client.cat.health({ ... }) +``` + +### Arguments [_arguments_cat.health] + +#### Request (object) [_request_cat.health] +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. +- **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.help [_cat.help] +Get CAT help. + +Get help for the CAT APIs. +```ts +client.cat.help() +``` + + +## client.cat.indices [_cat.indices] +Get index information. + +Get high-level information about indices in a cluster, including backing indices for data streams. + +Use this request to get the following information for each index in a cluster: +- shard count +- document count +- deleted document count +- primary store size +- total store size of all shards, including shard replicas + +These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. +To get an accurate count of Elasticsearch documents, use the cat count or count APIs. + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use an index endpoint. +```ts +client.cat.indices({ ... }) +``` + +### Arguments [_arguments_cat.indices] + +#### Request (object) [_request_cat.indices] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +- **`health` (Optional, Enum("green" \| "yellow" \| "red" \| "unknown" \| "unavailable"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.master [_cat.master] +Get master node information. + +Get information about the master node, including the ID, bound IP address, and name. + +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +```ts +client.cat.master({ ... }) +``` + +### Arguments [_arguments_cat.master] + +#### Request (object) [_request_cat.master] +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.mlDataFrameAnalytics [_cat.ml_data_frame_analytics] +Get data frame analytics jobs. + +Get configuration and usage information about data frame analytics jobs. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get data frame analytics jobs statistics API. +```ts +client.cat.mlDataFrameAnalytics({ ... }) +``` + +### Arguments [_arguments_cat.ml_data_frame_analytics] + +#### Request (object) [_request_cat.ml_data_frame_analytics] +- **`id` (Optional, string)**: The ID of the data frame analytics to fetch +- **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit in which to display byte values +- **`h` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names or column aliases used to sort the +response. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. + +## client.cat.mlDatafeeds [_cat.ml_datafeeds] +Get datafeeds. + +Get configuration and usage information about datafeeds. +This API returns a maximum of 10,000 datafeeds. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` +cluster privileges to use this API. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get datafeed statistics API. +```ts +client.cat.mlDatafeeds({ ... }) +``` + +### Arguments [_arguments_cat.ml_datafeeds] + +#### Request (object) [_request_cat.ml_datafeeds] +- **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +* Contains wildcard expressions and there are no datafeeds that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when +there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only +partial matches. +- **`h` (Optional, Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s") \| Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s")[])**: List of column names to display. +- **`s` (Optional, Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s") \| Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. + +## client.cat.mlJobs [_cat.ml_jobs] +Get anomaly detection jobs. + +Get configuration and usage information for anomaly detection jobs. +This API returns a maximum of 10,000 jobs. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get anomaly detection job statistics API. +```ts +client.cat.mlJobs({ ... }) +``` + +### Arguments [_arguments_cat.ml_jobs] + +#### Request (object) [_request_cat.ml_jobs] +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there +are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial +matches. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state") \| Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state") \| Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. + +## client.cat.mlTrainedModels [_cat.ml_trained_models] +Get trained models. + +Get configuration and usage information about inference trained models. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get trained models statistics API. +```ts +client.cat.mlTrainedModels({ ... }) +``` + +### Arguments [_arguments_cat.ml_trained_models] + +#### Request (object) [_request_cat.ml_trained_models] +- **`model_id` (Optional, string)**: A unique identifier for the trained model. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the API returns a 404 status code when there are no matches or only partial matches. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version") \| Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version")[])**: A list of column names to display. +- **`s` (Optional, Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version") \| Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version")[])**: A list of column names or aliases used to sort the response. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: The maximum number of transforms to display. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. + +## client.cat.nodeattrs [_cat.nodeattrs] +Get node attribute information. + +Get information about custom node attributes. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +```ts +client.cat.nodeattrs({ ... }) +``` + +### Arguments [_arguments_cat.nodeattrs] + +#### Request (object) [_request_cat.nodeattrs] +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.nodes [_cat.nodes] +Get node information. + +Get information about the nodes in a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +```ts +client.cat.nodes({ ... }) +``` + +### Arguments [_arguments_cat.nodes] + +#### Request (object) [_request_cat.nodes] +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`full_id` (Optional, boolean \| string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`h` (Optional, Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. + +## client.cat.pendingTasks [_cat.pending_tasks] +Get pending task information. + +Get information about cluster-level changes that have not yet taken effect. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. +```ts +client.cat.pendingTasks({ ... }) +``` + +### Arguments [_arguments_cat.pending_tasks] + +#### Request (object) [_request_cat.pending_tasks] +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. + +## client.cat.plugins [_cat.plugins] +Get plugin information. + +Get a list of plugins running on each node of a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +```ts +client.cat.plugins({ ... }) +``` + +### Arguments [_arguments_cat.plugins] + +#### Request (object) [_request_cat.plugins] +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`include_bootstrap` (Optional, boolean)**: Include bootstrap plugins in the response +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.recovery [_cat.recovery] +Get shard recovery information. + +Get information about ongoing and completed shard recoveries. +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. +For data streams, the API returns information about the stream’s backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. +```ts +client.cat.recovery({ ... }) +``` + +### Arguments [_arguments_cat.recovery] + +#### Request (object) [_request_cat.recovery] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`h` (Optional, Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis") \| Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. + +## client.cat.repositories [_cat.repositories] +Get snapshot repository information. + +Get a list of snapshot repositories for a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. +```ts +client.cat.repositories({ ... }) +``` + +### Arguments [_arguments_cat.repositories] + +#### Request (object) [_request_cat.repositories] +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.segments [_cat.segments] +Get segment information. + +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. +```ts +client.cat.segments({ ... }) +``` + +### Arguments [_arguments_cat.segments] + +#### Request (object) [_request_cat.segments] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id") \| Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.shards [_cat.shards] +Get shard information. + +Get information about the shards in a cluster. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. +```ts +client.cat.shards({ ... }) +``` + +### Arguments [_arguments_cat.shards] + +#### Request (object) [_request_cat.shards] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason") \| Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. + +## client.cat.snapshots [_cat.snapshots] +Get snapshot information. + +Get information about the snapshots stored in one or more repositories. +A snapshot is a backup of an index or running Elasticsearch cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. +```ts +client.cat.snapshots({ ... }) +``` + +### Arguments [_arguments_cat.snapshots] + +#### Request (object) [_request_cat.snapshots] +- **`repository` (Optional, string \| string[])**: A list of snapshot repositories used to limit the request. +Accepts wildcard expressions. +`_all` returns all repositories. +If any repository fails during the request, Elasticsearch returns an error. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. +- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. + +## client.cat.tasks [_cat.tasks] +Get task information. + +Get information about tasks currently running in the cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. +```ts +client.cat.tasks({ ... }) +``` + +### Arguments [_arguments_cat.tasks] + +#### Request (object) [_request_cat.tasks] +- **`actions` (Optional, string[])**: The task action names, which are used to limit the response. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. +- **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. + +## client.cat.templates [_cat.templates] +Get index template information. + +Get information about the index templates in a cluster. +You can use index templates to apply index settings and field mappings to new indices at creation. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. +```ts +client.cat.templates({ ... }) +``` + +### Arguments [_arguments_cat.templates] + +#### Request (object) [_request_cat.templates] +- **`name` (Optional, string)**: The name of the template to return. +Accepts wildcard expressions. If omitted, all templates are returned. +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.threadPool [_cat.thread_pool] +Get thread pool statistics. + +Get thread pool statistics for each node in a cluster. +Returned information includes all built-in thread pools and custom thread pools. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +```ts +client.cat.threadPool({ ... }) +``` + +### Arguments [_arguments_cat.thread_pool] + +#### Request (object) [_request_cat.thread_pool] +- **`thread_pool_patterns` (Optional, string \| string[])**: A list of thread pool names used to limit the request. +Accepts wildcard expressions. +- **`h` (Optional, Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type") \| Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. + +## client.cat.transforms [_cat.transforms] +Get transform information. + +Get configuration and usage information about transforms. + +CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get transform statistics API. +```ts +client.cat.transforms({ ... }) +``` + +### Arguments [_arguments_cat.transforms] + +#### Request (object) [_request_cat.transforms] +- **`transform_id` (Optional, string)**: A transform identifier or a wildcard expression. +If you do not specify one of these options, the API returns information for all transforms. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`h` (Optional, Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version") \| Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version")[])**: List of column names to display. +- **`s` (Optional, Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version") \| Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. +- **`size` (Optional, number)**: The maximum number of transforms to obtain. + +## client.ccr.deleteAutoFollowPattern [_ccr.delete_auto_follow_pattern] +Delete auto-follow patterns. + +Delete a collection of cross-cluster replication auto-follow patterns. +```ts +client.ccr.deleteAutoFollowPattern({ name }) +``` + +### Arguments [_arguments_ccr.delete_auto_follow_pattern] + +#### Request (object) [_request_ccr.delete_auto_follow_pattern] +- **`name` (string)**: The auto-follow pattern collection to delete. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.follow [_ccr.follow] +Create a follower. +Create a cross-cluster replication follower index that follows a specific leader index. +When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. +```ts +client.ccr.follow({ index, leader_index, remote_cluster }) +``` + +### Arguments [_arguments_ccr.follow] + +#### Request (object) [_request_ccr.follow] +- **`index` (string)**: The name of the follower index. +- **`leader_index` (string)**: The name of the index in the leader cluster to follow. +- **`remote_cluster` (string)**: The remote cluster containing the leader index. +- **`data_stream_name` (Optional, string)**: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. +- **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding write requests on the follower. +- **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. +- **`max_read_request_size` (Optional, number \| string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string \| -1 \| 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when +retrying. +- **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be +deferred until the number of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number \| string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will +be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. +- **`max_write_request_size` (Optional, number \| string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`read_poll_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. +When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. +Then the follower will immediately attempt to read from the leader again. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Settings to override from the leader index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be +active. +A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the +remote Lucene segment files to the follower index. + +## client.ccr.followInfo [_ccr.follow_info] +Get follower information. + +Get information about all cross-cluster replication follower indices. +For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. +```ts +client.ccr.followInfo({ index }) +``` + +### Arguments [_arguments_ccr.follow_info] + +#### Request (object) [_request_ccr.follow_info] +- **`index` (string \| string[])**: A comma-delimited list of follower index patterns. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.followStats [_ccr.follow_stats] +Get follower stats. + +Get cross-cluster replication follower stats. +The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. +```ts +client.ccr.followStats({ index }) +``` + +### Arguments [_arguments_ccr.follow_stats] + +#### Request (object) [_request_ccr.follow_stats] +- **`index` (string \| string[])**: A comma-delimited list of index patterns. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.ccr.forgetFollower [_ccr.forget_follower] +Forget a follower. +Remove the cross-cluster replication follower retention leases from the leader. + +A following index takes out retention leases on its leader index. +These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. +When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. +However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. +While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. +This API exists to enable manually removing the leases when the unfollow API is unable to do so. + +NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. +The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. +```ts +client.ccr.forgetFollower({ index }) +``` + +### Arguments [_arguments_ccr.forget_follower] + +#### Request (object) [_request_ccr.forget_follower] +- **`index` (string)**: the name of the leader index for which specified follower retention leases should be removed +- **`follower_cluster` (Optional, string)** +- **`follower_index` (Optional, string)** +- **`follower_index_uuid` (Optional, string)** +- **`leader_remote_cluster` (Optional, string)** +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ccr.getAutoFollowPattern [_ccr.get_auto_follow_pattern] +Get auto-follow patterns. + +Get cross-cluster replication auto-follow patterns. +```ts +client.ccr.getAutoFollowPattern({ ... }) +``` + +### Arguments [_arguments_ccr.get_auto_follow_pattern] + +#### Request (object) [_request_ccr.get_auto_follow_pattern] +- **`name` (Optional, string)**: The auto-follow pattern collection that you want to retrieve. +If you do not specify a name, the API returns information for all collections. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.pauseAutoFollowPattern [_ccr.pause_auto_follow_pattern] +Pause an auto-follow pattern. + +Pause a cross-cluster replication auto-follow pattern. +When the API returns, the auto-follow pattern is inactive. +New indices that are created on the remote cluster and match the auto-follow patterns are ignored. + +You can resume auto-following with the resume auto-follow pattern API. +When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. +Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. +```ts +client.ccr.pauseAutoFollowPattern({ name }) +``` + +### Arguments [_arguments_ccr.pause_auto_follow_pattern] + +#### Request (object) [_request_ccr.pause_auto_follow_pattern] +- **`name` (string)**: The name of the auto-follow pattern to pause. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.pauseFollow [_ccr.pause_follow] +Pause a follower. + +Pause a cross-cluster replication follower index. +The follower index will not fetch any additional operations from the leader index. +You can resume following with the resume follower API. +You can pause and resume a follower index to change the configuration of the following task. +```ts +client.ccr.pauseFollow({ index }) +``` + +### Arguments [_arguments_ccr.pause_follow] + +#### Request (object) [_request_ccr.pause_follow] +- **`index` (string)**: The name of the follower index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.putAutoFollowPattern [_ccr.put_auto_follow_pattern] +Create or update auto-follow patterns. +Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. +Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. + +This API can also be used to update auto-follow patterns. +NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. +```ts +client.ccr.putAutoFollowPattern({ name, remote_cluster }) +``` + +### Arguments [_arguments_ccr.put_auto_follow_pattern] + +#### Request (object) [_request_ccr.put_auto_follow_pattern] +- **`name` (string)**: The name of the collection of auto-follow patterns. +- **`remote_cluster` (string)**: The remote cluster containing the leader indices to match against. +- **`follow_index_pattern` (Optional, string)**: The name of follower index. The template `leader_index` can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use `leader_index`; CCR does not support changes to the names of a follower data stream’s backing indices. +- **`leader_index_patterns` (Optional, string[])**: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. +- **`leader_index_exclusion_patterns` (Optional, string[])**: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. +- **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`settings` (Optional, Record)**: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). +- **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`read_poll_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. +- **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. +- **`max_read_request_size` (Optional, number \| string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string \| -1 \| 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. +- **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number \| string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. +- **`max_write_request_size` (Optional, number \| string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.ccr.resumeAutoFollowPattern [_ccr.resume_auto_follow_pattern] +Resume an auto-follow pattern. + +Resume a cross-cluster replication auto-follow pattern that was paused. +The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. +Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. +```ts +client.ccr.resumeAutoFollowPattern({ name }) +``` + +### Arguments [_arguments_ccr.resume_auto_follow_pattern] + +#### Request (object) [_request_ccr.resume_auto_follow_pattern] +- **`name` (string)**: The name of the auto-follow pattern to resume. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.resumeFollow [_ccr.resume_follow] +Resume a follower. +Resume a cross-cluster replication follower index that was paused. +The follower index could have been paused with the pause follower API. +Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. +When this API returns, the follower index will resume fetching operations from the leader index. +```ts +client.ccr.resumeFollow({ index }) +``` + +### Arguments [_arguments_ccr.resume_follow] + +#### Request (object) [_request_ccr.resume_follow] +- **`index` (string)**: The name of the follow index to resume following. +- **`max_outstanding_read_requests` (Optional, number)** +- **`max_outstanding_write_requests` (Optional, number)** +- **`max_read_request_operation_count` (Optional, number)** +- **`max_read_request_size` (Optional, string)** +- **`max_retry_delay` (Optional, string \| -1 \| 0)** +- **`max_write_buffer_count` (Optional, number)** +- **`max_write_buffer_size` (Optional, string)** +- **`max_write_request_operation_count` (Optional, number)** +- **`max_write_request_size` (Optional, string)** +- **`read_poll_timeout` (Optional, string \| -1 \| 0)** +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.ccr.stats [_ccr.stats] +Get cross-cluster replication stats. + +This API returns stats about auto-following and the same shard-level stats as the get follower stats API. +```ts +client.ccr.stats({ ... }) +``` + +### Arguments [_arguments_ccr.stats] + +#### Request (object) [_request_ccr.stats] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ccr.unfollow [_ccr.unfollow] +Unfollow an index. + +Convert a cross-cluster replication follower index to a regular index. +The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +The follower index must be paused and closed before you call the unfollow API. + +> info +> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. +```ts +client.ccr.unfollow({ index }) +``` + +### Arguments [_arguments_ccr.unfollow] + +#### Request (object) [_request_ccr.unfollow] +- **`index` (string)**: The name of the follower index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.cluster.allocationExplain [_cluster.allocation_explain] +Explain the shard allocations. +Get explanations for shard allocations in the cluster. +For unassigned shards, it provides an explanation for why the shard is unassigned. +For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. +This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. +```ts +client.cluster.allocationExplain({ ... }) +``` + +### Arguments [_arguments_cluster.allocation_explain] + +#### Request (object) [_request_cluster.allocation_explain] +- **`current_node` (Optional, string)**: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. +- **`index` (Optional, string)**: Specifies the name of the index that you would like an explanation for. +- **`primary` (Optional, boolean)**: If true, returns explanation for the primary shard for the given shard ID. +- **`shard` (Optional, number)**: Specifies the ID of the shard that you would like an explanation for. +- **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. +- **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cluster.deleteComponentTemplate [_cluster.delete_component_template] +Delete component templates. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. +```ts +client.cluster.deleteComponentTemplate({ name }) +``` + +### Arguments [_arguments_cluster.delete_component_template] + +#### Request (object) [_request_cluster.delete_component_template] +- **`name` (string \| string[])**: List or wildcard expression of component template names used to limit the request. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.deleteVotingConfigExclusions [_cluster.delete_voting_config_exclusions] +Clear cluster voting config exclusions. +Remove master-eligible nodes from the voting configuration exclusion list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-cluster-post-voting-config-exclusions) + +```ts +client.cluster.deleteVotingConfigExclusions({ ... }) +``` + +### Arguments [_arguments_cluster.delete_voting_config_exclusions] + +#### Request (object) [_request_cluster.delete_voting_config_exclusions] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`wait_for_removal` (Optional, boolean)**: Specifies whether to wait for all excluded nodes to be removed from the +cluster before clearing the voting configuration exclusions list. +Defaults to true, meaning that all excluded nodes must be removed from +the cluster before this API takes any action. If set to false then the +voting configuration exclusions list is cleared even if some excluded +nodes are still in the cluster. + +## client.cluster.existsComponentTemplate [_cluster.exists_component_template] +Check component templates. +Returns information about whether a particular component template exists. +```ts +client.cluster.existsComponentTemplate({ name }) +``` + +### Arguments [_arguments_cluster.exists_component_template] + +#### Request (object) [_request_cluster.exists_component_template] +- **`name` (string \| string[])**: List of component template names used to limit the request. +Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. +Defaults to false, which means information is retrieved from the master node. + +## client.cluster.getComponentTemplate [_cluster.get_component_template] +Get component templates. +Get information about component templates. +```ts +client.cluster.getComponentTemplate({ ... }) +``` + +### Arguments [_arguments_cluster.get_component_template] + +#### Request (object) [_request_cluster.get_component_template] +- **`name` (Optional, string)**: List of component template names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`settings_filter` (Optional, string \| string[])**: Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys +- **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.getSettings [_cluster.get_settings] +Get cluster-wide settings. +By default, it returns only settings that have been explicitly defined. +```ts +client.cluster.getSettings({ ... }) +``` + +### Arguments [_arguments_cluster.get_settings] + +#### Request (object) [_request_cluster.get_settings] +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`include_defaults` (Optional, boolean)**: If `true`, also returns default values for all other cluster settings, reflecting the values +in the `elasticsearch.yml` file of one of the nodes in the cluster. If the nodes in your +cluster do not all have the same values in their `elasticsearch.yml` config files then the +values returned by this API may vary from invocation to invocation and may not reflect the +values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to +fetch the settings for each individual node in your cluster. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.health [_cluster.health] +Get the cluster health status. + +You can also use the API to get the health status of only specified data streams and indices. +For data streams, the API retrieves the health status of the stream’s backing indices. + +The cluster health status is: green, yellow or red. +On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. +The index level status is controlled by the worst shard status. + +One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. +The cluster status is controlled by the worst index status. +```ts +client.cluster.health({ ... }) +``` + +### Arguments [_arguments_cluster.health] + +#### Request (object) [_request_cluster.health] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. +- **`wait_for_events` (Optional, Enum("immediate" \| "urgent" \| "high" \| "normal" \| "low" \| "languid"))**: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. +- **`wait_for_nodes` (Optional, string \| number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. + +## client.cluster.info [_cluster.info] +Get cluster info. +Returns basic information about the cluster. +```ts +client.cluster.info({ target }) +``` + +### Arguments [_arguments_cluster.info] + +#### Request (object) [_request_cluster.info] +- **`target` (Enum("_all" \| "http" \| "ingest" \| "thread_pool" \| "script") \| Enum("_all" \| "http" \| "ingest" \| "thread_pool" \| "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. + +## client.cluster.pendingTasks [_cluster.pending_tasks] +Get the pending cluster tasks. +Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. + +NOTE: This API returns a list of any pending updates to the cluster state. +These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. +However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. +```ts +client.cluster.pendingTasks({ ... }) +``` + +### Arguments [_arguments_cluster.pending_tasks] + +#### Request (object) [_request_cluster.pending_tasks] +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.postVotingConfigExclusions [_cluster.post_voting_config_exclusions] +Update voting configuration exclusions. +Update the cluster voting config exclusions by node IDs or node names. +By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. +If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. +The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. +It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. + +Clusters should have no voting configuration exclusions in normal operation. +Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. +This API waits for the nodes to be fully removed from the cluster before it returns. +If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. + +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. +In that case, you may safely retry the call. + +NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. +They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-cluster-post-voting-config-exclusions) + +```ts +client.cluster.postVotingConfigExclusions({ ... }) +``` + +### Arguments [_arguments_cluster.post_voting_config_exclusions] + +#### Request (object) [_request_cluster.post_voting_config_exclusions] +- **`node_names` (Optional, string \| string[])**: A list of the names of the nodes to exclude from the +voting configuration. If specified, you may not also specify node_ids. +- **`node_ids` (Optional, string \| string[])**: A list of the persistent ids of the nodes to exclude +from the voting configuration. If specified, you may not also specify node_names. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: When adding a voting configuration exclusion, the API waits for the +specified nodes to be excluded from the voting configuration before +returning. If the timeout expires before the appropriate condition +is satisfied, the request fails and returns an error. + +## client.cluster.putComponentTemplate [_cluster.put_component_template] +Create or update a component template. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +An index template can be composed of multiple component templates. +To use a component template, specify it in an index template’s `composed_of` list. +Component templates are only applied to new data streams and indices as part of a matching index template. + +Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. + +Component templates are only used during index creation. +For data streams, this includes data stream creation and the creation of a stream’s backing indices. +Changes to component templates do not affect existing indices, including a stream’s backing indices. + +You can use C-style `/* *\/` block comments in component templates. +You can include comments anywhere in the request body except before the opening curly bracket. + +**Applying component templates** + +You cannot directly apply a component template to a data stream or index. +To be applied, a component template must be included in an index template's `composed_of` list. +```ts +client.cluster.putComponentTemplate({ name, template }) +``` + +### Arguments [_arguments_cluster.put_component_template] + +#### Request (object) [_request_cluster.put_component_template] +- **`name` (string)**: Name of the component template to create. +Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +Elastic Agent uses these templates to configure backing indices for its data streams. +If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. +If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. +- **`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })**: The template to be applied which includes mappings, settings, or aliases configuration. +- **`version` (Optional, number)**: Version number used to manage component templates externally. +This number isn't automatically generated or incremented by Elasticsearch. +To unset a version, replace the template without specifying a version. +- **`_meta` (Optional, Record)**: Optional user metadata about the component template. +It may have any contents. This map is not automatically generated by Elasticsearch. +This information is stored in the cluster state, so keeping it short is preferable. +To unset `_meta`, replace the template without specifying this information. +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. +- **`cause` (Optional, string)**: User defined reason for create the component template. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.putSettings [_cluster.put_settings] +Update the cluster settings. + +Configure and update dynamic settings on a running cluster. +You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. + +Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. +You can also reset transient or persistent settings by assigning them a null value. + +If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. +For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. +However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. + +TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. +If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. +Only use `elasticsearch.yml` for static cluster settings and node settings. +The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. + +WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. +If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. +```ts +client.cluster.putSettings({ ... }) +``` + +### Arguments [_arguments_cluster.put_settings] + +#### Request (object) [_request_cluster.put_settings] +- **`persistent` (Optional, Record)**: The settings that persist after the cluster restarts. +- **`transient` (Optional, Record)**: The settings that do not persist after the cluster restarts. +- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout + +## client.cluster.remoteInfo [_cluster.remote_info] +Get remote cluster information. + +Get information about configured remote clusters. +The API returns connection and endpoint information keyed by the configured remote cluster alias. + +> info +> This API returns information that reflects current state on the local cluster. +> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. +> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. +> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the `/_resolve/cluster` endpoint. +```ts +client.cluster.remoteInfo() +``` + + +## client.cluster.reroute [_cluster.reroute] +Reroute the cluster. +Manually change the allocation of individual shards in the cluster. +For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. + +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. +For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. + +The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. +If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. + +The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. +This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. + +Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. +```ts +client.cluster.reroute({ ... }) +``` + +### Arguments [_arguments_cluster.reroute] + +#### Request (object) [_request_cluster.reroute] +- **`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])**: Defines the commands to perform. +- **`dry_run` (Optional, boolean)**: If true, then the request simulates the operation. +It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. +- **`explain` (Optional, boolean)**: If true, then the response contains an explanation of why the commands can or cannot run. +- **`metric` (Optional, string \| string[])**: Limits the information returned to the specified metrics. +- **`retry_failed` (Optional, boolean)**: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.state [_cluster.state] +Get the cluster state. +Get comprehensive information about the state of the cluster. + +The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. + +The elected master node ensures that every node in the cluster has a copy of the same cluster state. +This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. +You may need to consult the Elasticsearch source code to determine the precise meaning of the response. + +By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. +You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. + +Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. +If you use this API repeatedly, your cluster may become unstable. + +WARNING: The response is a representation of an internal data structure. +Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. +Do not query this API using external monitoring tools. +Instead, obtain the information you require using other more stable cluster APIs. +```ts +client.cluster.state({ ... }) +``` + +### Arguments [_arguments_cluster.state] + +#### Request (object) [_request_cluster.state] +- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version +- **`wait_for_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for wait_for_metadata_version before timing out + +## client.cluster.stats [_cluster.stats] +Get cluster statistics. +Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). +```ts +client.cluster.stats({ ... }) +``` + +### Arguments [_arguments_cluster.stats] + +#### Request (object) [_request_cluster.stats] +- **`node_id` (Optional, string \| string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. +- **`include_remotes` (Optional, boolean)**: Include remote cluster data into the response +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its stats. +However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. + +## client.connector.checkIn [_connector.check_in] +Check in a connector. + +Update the `last_seen` field in the connector and set it to the current timestamp. +```ts +client.connector.checkIn({ connector_id }) +``` + +### Arguments [_arguments_connector.check_in] + +#### Request (object) [_request_connector.check_in] +- **`connector_id` (string)**: The unique identifier of the connector to be checked in + +## client.connector.delete [_connector.delete] +Delete a connector. + +Removes a connector and associated sync jobs. +This is a destructive action that is not recoverable. +NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. +These need to be removed manually. +```ts +client.connector.delete({ connector_id }) +``` + +### Arguments [_arguments_connector.delete] + +#### Request (object) [_request_connector.delete] +- **`connector_id` (string)**: The unique identifier of the connector to be deleted +- **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. Defaults to false. + +## client.connector.get [_connector.get] +Get a connector. + +Get the details about a connector. +```ts +client.connector.get({ connector_id }) +``` + +### Arguments [_arguments_connector.get] + +#### Request (object) [_request_connector.get] +- **`connector_id` (string)**: The unique identifier of the connector + +## client.connector.list [_connector.list] +Get all connectors. + +Get information about all connectors. +```ts +client.connector.list({ ... }) +``` + +### Arguments [_arguments_connector.list] + +#### Request (object) [_request_connector.list] +- **`from` (Optional, number)**: Starting offset (default: 0) +- **`size` (Optional, number)**: Specifies a max number of results to get +- **`index_name` (Optional, string \| string[])**: A list of connector index names to fetch connector documents for +- **`connector_name` (Optional, string \| string[])**: A list of connector names to fetch connector documents for +- **`service_type` (Optional, string \| string[])**: A list of connector service types to fetch connector documents for +- **`query` (Optional, string)**: A wildcard query string that filters connectors with matching name, description or index name + +## client.connector.post [_connector.post] +Create a connector. + +Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. +Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. +Self-managed connectors (Connector clients) are self-managed on your infrastructure. +```ts +client.connector.post({ ... }) +``` + +### Arguments [_arguments_connector.post] + +#### Request (object) [_request_connector.post] +- **`description` (Optional, string)** +- **`index_name` (Optional, string)** +- **`is_native` (Optional, boolean)** +- **`language` (Optional, string)** +- **`name` (Optional, string)** +- **`service_type` (Optional, string)** + +## client.connector.put [_connector.put] +Create or update a connector. +```ts +client.connector.put({ ... }) +``` + +### Arguments [_arguments_connector.put] + +#### Request (object) [_request_connector.put] +- **`connector_id` (Optional, string)**: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. +- **`description` (Optional, string)** +- **`index_name` (Optional, string)** +- **`is_native` (Optional, boolean)** +- **`language` (Optional, string)** +- **`name` (Optional, string)** +- **`service_type` (Optional, string)** + +## client.connector.syncJobCancel [_connector.sync_job_cancel] +Cancel a connector sync job. + +Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. +The connector service is then responsible for setting the status of connector sync jobs to cancelled. +```ts +client.connector.syncJobCancel({ connector_sync_job_id }) +``` + +### Arguments [_arguments_connector.sync_job_cancel] + +#### Request (object) [_request_connector.sync_job_cancel] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job + +## client.connector.syncJobCheckIn [_connector.sync_job_check_in] +Check in a connector sync job. +Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. +```ts +client.connector.syncJobCheckIn({ connector_sync_job_id }) +``` + +### Arguments [_arguments_connector.sync_job_check_in] + +#### Request (object) [_request_connector.sync_job_check_in] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be checked in. + +## client.connector.syncJobClaim [_connector.sync_job_claim] +Claim a connector sync job. +This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. +Additionally, it can set the `sync_cursor` property for the sync job. + +This API is not intended for direct connector management by users. +It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. +```ts +client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) +``` + +### Arguments [_arguments_connector.sync_job_claim] + +#### Request (object) [_request_connector.sync_job_claim] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. +- **`worker_hostname` (string)**: The host name of the current system that will run the job. +- **`sync_cursor` (Optional, User-defined value)**: The cursor object from the last incremental sync job. +This should reference the `sync_cursor` field in the connector state for which the job runs. + +## client.connector.syncJobDelete [_connector.sync_job_delete] +Delete a connector sync job. + +Remove a connector sync job and its associated data. +This is a destructive action that is not recoverable. +```ts +client.connector.syncJobDelete({ connector_sync_job_id }) +``` + +### Arguments [_arguments_connector.sync_job_delete] + +#### Request (object) [_request_connector.sync_job_delete] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be deleted + +## client.connector.syncJobError [_connector.sync_job_error] +Set a connector sync job error. +Set the `error` field for a connector sync job and set its `status` to `error`. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. +```ts +client.connector.syncJobError({ connector_sync_job_id, error }) +``` + +### Arguments [_arguments_connector.sync_job_error] + +#### Request (object) [_request_connector.sync_job_error] +- **`connector_sync_job_id` (string)**: The unique identifier for the connector sync job. +- **`error` (string)**: The error for the connector sync job error field. + +## client.connector.syncJobGet [_connector.sync_job_get] +Get a connector sync job. +```ts +client.connector.syncJobGet({ connector_sync_job_id }) +``` + +### Arguments [_arguments_connector.sync_job_get] + +#### Request (object) [_request_connector.sync_job_get] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job + +## client.connector.syncJobList [_connector.sync_job_list] +Get all connector sync jobs. + +Get information about all stored connector sync jobs listed by their creation date in ascending order. +```ts +client.connector.syncJobList({ ... }) +``` + +### Arguments [_arguments_connector.sync_job_list] + +#### Request (object) [_request_connector.sync_job_list] +- **`from` (Optional, number)**: Starting offset (default: 0) +- **`size` (Optional, number)**: Specifies a max number of results to get +- **`status` (Optional, Enum("canceling" \| "canceled" \| "completed" \| "error" \| "in_progress" \| "pending" \| "suspended"))**: A sync job status to fetch connector sync jobs for +- **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for +- **`job_type` (Optional, Enum("full" \| "incremental" \| "access_control") \| Enum("full" \| "incremental" \| "access_control")[])**: A list of job types to fetch the sync jobs for + +## client.connector.syncJobPost [_connector.sync_job_post] +Create a connector sync job. + +Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. +```ts +client.connector.syncJobPost({ id }) +``` + +### Arguments [_arguments_connector.sync_job_post] + +#### Request (object) [_request_connector.sync_job_post] +- **`id` (string)**: The id of the associated connector +- **`job_type` (Optional, Enum("full" \| "incremental" \| "access_control"))** +- **`trigger_method` (Optional, Enum("on_demand" \| "scheduled"))** + +## client.connector.syncJobUpdateStats [_connector.sync_job_update_stats] +Set the connector sync job stats. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. +You can also update `last_seen`. +This API is mainly used by the connector service for updating sync job information. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. +```ts +client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) +``` + +### Arguments [_arguments_connector.sync_job_update_stats] + +#### Request (object) [_request_connector.sync_job_update_stats] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. +- **`deleted_document_count` (number)**: The number of documents the sync job deleted. +- **`indexed_document_count` (number)**: The number of documents the sync job indexed. +- **`indexed_document_volume` (number)**: The total size of the data (in MiB) the sync job indexed. +- **`last_seen` (Optional, string \| -1 \| 0)**: The timestamp to use in the `last_seen` property for the connector sync job. +- **`metadata` (Optional, Record)**: The connector-specific metadata. +- **`total_document_count` (Optional, number)**: The total number of documents in the target index after the sync job finished. + +## client.connector.updateActiveFiltering [_connector.update_active_filtering] +Activate the connector draft filter. + +Activates the valid draft filtering for a connector. +```ts +client.connector.updateActiveFiltering({ connector_id }) +``` + +### Arguments [_arguments_connector.update_active_filtering] + +#### Request (object) [_request_connector.update_active_filtering] +- **`connector_id` (string)**: The unique identifier of the connector to be updated + +## client.connector.updateApiKeyId [_connector.update_api_key_id] +Update the connector API key ID. + +Update the `api_key_id` and `api_key_secret_id` fields of a connector. +You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. +The connector secret ID is required only for Elastic managed (native) connectors. +Self-managed connectors (connector clients) do not use this field. +```ts +client.connector.updateApiKeyId({ connector_id }) +``` + +### Arguments [_arguments_connector.update_api_key_id] + +#### Request (object) [_request_connector.update_api_key_id] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`api_key_id` (Optional, string)** +- **`api_key_secret_id` (Optional, string)** + +## client.connector.updateConfiguration [_connector.update_configuration] +Update the connector configuration. + +Update the configuration field in the connector document. +```ts +client.connector.updateConfiguration({ connector_id }) +``` + +### Arguments [_arguments_connector.update_configuration] + +#### Request (object) [_request_connector.update_configuration] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`configuration` (Optional, Record)** +- **`values` (Optional, Record)** + +## client.connector.updateError [_connector.update_error] +Update the connector error field. + +Set the error field for the connector. +If the error provided in the request body is non-null, the connector’s status is updated to error. +Otherwise, if the error is reset to null, the connector status is updated to connected. +```ts +client.connector.updateError({ connector_id, error }) +``` + +### Arguments [_arguments_connector.update_error] + +#### Request (object) [_request_connector.update_error] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`error` (T \| null)** + +## client.connector.updateFeatures [_connector.update_features] +Update the connector features. +Update the connector features in the connector document. +This API can be used to control the following aspects of a connector: + +* document-level security +* incremental syncs +* advanced sync rules +* basic sync rules + +Normally, the running connector service automatically manages these features. +However, you can use this API to override the default behavior. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. +```ts +client.connector.updateFeatures({ connector_id, features }) +``` + +### Arguments [_arguments_connector.update_features] + +#### Request (object) [_request_connector.update_features] +- **`connector_id` (string)**: The unique identifier of the connector to be updated. +- **`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })** + +## client.connector.updateFiltering [_connector.update_filtering] +Update the connector filtering. + +Update the draft filtering configuration of a connector and marks the draft validation state as edited. +The filtering draft is activated once validated by the running Elastic connector service. +The filtering property is used to configure sync rules (both basic and advanced) for a connector. +```ts +client.connector.updateFiltering({ connector_id }) +``` + +### Arguments [_arguments_connector.update_filtering] + +#### Request (object) [_request_connector.update_filtering] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`filtering` (Optional, { active, domain, draft }[])** +- **`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])** +- **`advanced_snippet` (Optional, { created_at, updated_at, value })** + +## client.connector.updateFilteringValidation [_connector.update_filtering_validation] +Update the connector draft filtering validation. + +Update the draft filtering validation info for a connector. +```ts +client.connector.updateFilteringValidation({ connector_id, validation }) +``` + +### Arguments [_arguments_connector.update_filtering_validation] + +#### Request (object) [_request_connector.update_filtering_validation] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`validation` ({ errors, state })** + +## client.connector.updateIndexName [_connector.update_index_name] +Update the connector index name. + +Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. +```ts +client.connector.updateIndexName({ connector_id, index_name }) +``` + +### Arguments [_arguments_connector.update_index_name] + +#### Request (object) [_request_connector.update_index_name] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`index_name` (T \| null)** + +## client.connector.updateName [_connector.update_name] +Update the connector name and description. +```ts +client.connector.updateName({ connector_id }) +``` + +### Arguments [_arguments_connector.update_name] + +#### Request (object) [_request_connector.update_name] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`name` (Optional, string)** +- **`description` (Optional, string)** + +## client.connector.updateNative [_connector.update_native] +Update the connector is_native flag. +```ts +client.connector.updateNative({ connector_id, is_native }) +``` + +### Arguments [_arguments_connector.update_native] + +#### Request (object) [_request_connector.update_native] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`is_native` (boolean)** + +## client.connector.updatePipeline [_connector.update_pipeline] +Update the connector pipeline. + +When you create a new connector, the configuration of an ingest pipeline is populated with default settings. +```ts +client.connector.updatePipeline({ connector_id, pipeline }) +``` + +### Arguments [_arguments_connector.update_pipeline] + +#### Request (object) [_request_connector.update_pipeline] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })** + +## client.connector.updateScheduling [_connector.update_scheduling] +Update the connector scheduling. +```ts +client.connector.updateScheduling({ connector_id, scheduling }) +``` + +### Arguments [_arguments_connector.update_scheduling] + +#### Request (object) [_request_connector.update_scheduling] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`scheduling` ({ access_control, full, incremental })** + +## client.connector.updateServiceType [_connector.update_service_type] +Update the connector service type. +```ts +client.connector.updateServiceType({ connector_id, service_type }) +``` + +### Arguments [_arguments_connector.update_service_type] + +#### Request (object) [_request_connector.update_service_type] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`service_type` (string)** + +## client.connector.updateStatus [_connector.update_status] +Update the connector status. +```ts +client.connector.updateStatus({ connector_id, status }) +``` + +### Arguments [_arguments_connector.update_status] + +#### Request (object) [_request_connector.update_status] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`status` (Enum("created" \| "needs_configuration" \| "configured" \| "connected" \| "error"))** + +## client.danglingIndices.deleteDanglingIndex [_dangling_indices.delete_dangling_index] +Delete a dangling index. +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +```ts +client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) +``` + +### Arguments [_arguments_dangling_indices.delete_dangling_index] + +#### Request (object) [_request_dangling_indices.delete_dangling_index] +- **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. +- **`accept_data_loss` (boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout + +## client.danglingIndices.importDanglingIndex [_dangling_indices.import_dangling_index] +Import a dangling index. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +```ts +client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) +``` + +### Arguments [_arguments_dangling_indices.import_dangling_index] + +#### Request (object) [_request_dangling_indices.import_dangling_index] +- **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. +- **`accept_data_loss` (boolean)**: This parameter must be set to true to import a dangling index. +Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout + +## client.danglingIndices.listDanglingIndices [_dangling_indices.list_dangling_indices] +Get the dangling indices. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +Use this API to list dangling indices, which you can then import or delete. +```ts +client.danglingIndices.listDanglingIndices() +``` + + +## client.enrich.deletePolicy [_enrich.delete_policy] +Delete an enrich policy. +Deletes an existing enrich policy and its enrich index. +```ts +client.enrich.deletePolicy({ name }) +``` + +### Arguments [_arguments_enrich.delete_policy] + +#### Request (object) [_request_enrich.delete_policy] +- **`name` (string)**: Enrich policy to delete. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.enrich.executePolicy [_enrich.execute_policy] +Run an enrich policy. +Create the enrich index for an existing enrich policy. +```ts +client.enrich.executePolicy({ name }) +``` + +### Arguments [_arguments_enrich.execute_policy] + +#### Request (object) [_request_enrich.execute_policy] +- **`name` (string)**: Enrich policy to execute. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks other enrich policy execution requests until complete. + +## client.enrich.getPolicy [_enrich.get_policy] +Get an enrich policy. +Returns information about an enrich policy. +```ts +client.enrich.getPolicy({ ... }) +``` + +### Arguments [_arguments_enrich.get_policy] + +#### Request (object) [_request_enrich.get_policy] +- **`name` (Optional, string \| string[])**: List of enrich policy names used to limit the request. +To return information for all enrich policies, omit this parameter. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.enrich.putPolicy [_enrich.put_policy] +Create an enrich policy. +Creates an enrich policy. +```ts +client.enrich.putPolicy({ name }) +``` + +### Arguments [_arguments_enrich.put_policy] + +#### Request (object) [_request_enrich.put_policy] +- **`name` (string)**: Name of the enrich policy to create or update. +- **`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `geo_shape` query. +- **`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `term` query. +- **`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.enrich.stats [_enrich.stats] +Get enrich stats. +Returns enrich coordinator statistics and information about enrich policies that are currently executing. +```ts +client.enrich.stats({ ... }) +``` + +### Arguments [_arguments_enrich.stats] + +#### Request (object) [_request_enrich.stats] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.eql.delete [_eql.delete] +Delete an async EQL search. +Delete an async EQL search or a stored synchronous EQL search. +The API also deletes results for the search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-eql-delete) + +```ts +client.eql.delete({ id }) +``` + +### Arguments [_arguments_eql.delete] + +#### Request (object) [_request_eql.delete] +- **`id` (string)**: Identifier for the search to delete. +A search ID is provided in the EQL search API's response for an async search. +A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. + +## client.eql.get [_eql.get] +Get async EQL search results. +Get the current status and available results for an async EQL search or a stored synchronous EQL search. +```ts +client.eql.get({ id }) +``` + +### Arguments [_arguments_eql.get] + +#### Request (object) [_request_eql.get] +- **`id` (string)**: Identifier for the search. +- **`keep_alive` (Optional, string \| -1 \| 0)**: Period for which the search and its results are stored on the cluster. +Defaults to the keep_alive value set by the search’s EQL search API request. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Timeout duration to wait for the request to finish. +Defaults to no timeout, meaning the request waits for complete search results. + +## client.eql.getStatus [_eql.get_status] +Get the async EQL status. +Get the current status for an async EQL search or a stored synchronous EQL search without returning results. +```ts +client.eql.getStatus({ id }) +``` + +### Arguments [_arguments_eql.get_status] + +#### Request (object) [_request_eql.get_status] +- **`id` (string)**: Identifier for the search. + +## client.eql.search [_eql.search] +Get EQL search results. +Returns search results for an Event Query Language (EQL) query. +EQL assumes each document in a data stream or index corresponds to an event. +```ts +client.eql.search({ index, query }) +``` + +### Arguments [_arguments_eql.search] + +#### Request (object) [_request_eql.search] +- **`index` (string \| string[])**: The name of the index to scope the operation +- **`query` (string)**: EQL query you wish to run. +- **`case_sensitive` (Optional, boolean)** +- **`event_category_field` (Optional, string)**: Field containing the event classification, such as process, file, or network. +- **`tiebreaker_field` (Optional, string)**: Field used to sort hits with the same timestamp in ascending order +- **`timestamp_field` (Optional, string)**: Field containing event timestamp. Default "@timestamp" +- **`fetch_size` (Optional, number)**: Maximum number of events to search at a time for sequence queries. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } \| { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. +- **`keep_alive` (Optional, string \| -1 \| 0)** +- **`keep_on_completion` (Optional, boolean)** +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)** +- **`allow_partial_search_results` (Optional, boolean)**: Allow query execution also in case of shard failures. +If true, the query will keep running and will return results based on the available shards. +For sequences, the behavior can be further refined using allow_partial_sequence_results +- **`allow_partial_sequence_results` (Optional, boolean)**: This flag applies only to sequences and has effect only if allow_partial_search_results=true. +If true, the sequence query will return results based on the available shards, ignoring the others. +If false, the sequence query will return successfully, but will always have empty results. +- **`size` (Optional, number)**: For basic queries, the maximum number of matching events to return. Defaults to 10 +- **`fields` (Optional, { field, format, include_unmapped } \| { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. +- **`result_position` (Optional, Enum("tail" \| "head"))** +- **`runtime_mappings` (Optional, Record)** +- **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` +parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the +`max_samples_per_key` parameter. Pipes are not supported for sample queries. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. + +## client.esql.asyncQuery [_esql.async_query] +Run an async ES|QL query. +Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. + +The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. +```ts +client.esql.asyncQuery({ query }) +``` + +### Arguments [_arguments_esql.async_query] + +#### Request (object) [_request_esql.async_query] +- **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +- **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +- **`locale` (Optional, string)** +- **`params` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +- **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. +By default, the request waits for 1 second for the query results. +If the query completes during this period, results are returned +Otherwise, a query ID is returned that can later be used to retrieve the results. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. +The default period is five days. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. +- **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. +If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. +- **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. +If `false`, the query will fail if there are any failures. + +To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. +- **`delimiter` (Optional, string)**: The character to use between values within a CSV row. +It is valid only for the CSV format. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, e.g. json, yaml. + +`csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. + +For async requests, nothing will be returned if the async query doesn't finish within the timeout. +The query ID and running status are available in the `X-Elasticsearch-Async-Id` and `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively. + +## client.esql.asyncQueryDelete [_esql.async_query_delete] +Delete an async ES|QL query. +If the query is still running, it is cancelled. +Otherwise, the stored results are deleted. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: + +* The authenticated user that submitted the original query request +* Users with the `cancel_task` cluster privilege +```ts +client.esql.asyncQueryDelete({ id }) +``` + +### Arguments [_arguments_esql.async_query_delete] + +#### Request (object) [_request_esql.async_query_delete] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. + +## client.esql.asyncQueryGet [_esql.async_query_get] +Get async ES|QL query results. +Get the current status and available results or stored results for an ES|QL asynchronous query. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. +```ts +client.esql.asyncQueryGet({ id }) +``` + +### Arguments [_arguments_esql.async_query_get] + +#### Request (object) [_request_esql.async_query_get] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. +By default, the request waits for complete query results. +If the request completes during the period specified in this parameter, complete query results are returned. +Otherwise, the response returns an `is_running` value of `true` and no results. + +## client.esql.asyncQueryStop [_esql.async_query_stop] +Stop async ES|QL query. + +This API interrupts the query execution and returns the results so far. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. +```ts +client.esql.asyncQueryStop({ id }) +``` + +### Arguments [_arguments_esql.async_query_stop] + +#### Request (object) [_request_esql.async_query_stop] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + +## client.esql.query [_esql.query] +Run an ES|QL query. +Get search results for an ES|QL (Elasticsearch query language) query. +```ts +client.esql.query({ query }) +``` + +### Arguments [_arguments_esql.query] + +#### Request (object) [_request_esql.query] +- **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +- **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +- **`locale` (Optional, string)** +- **`params` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +- **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, e.g. json, yaml. + +`csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. +- **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. +- **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? +Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. +- **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. +If `false`, the query will fail if there are any failures. + +To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. + +## client.features.getFeatures [_features.get_features] +Get the features. +Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. +You can use this API to determine which feature states to include when taking a snapshot. +By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. + +A feature state includes one or more system indices necessary for a given feature to function. +In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. + +The features listed by this API are a combination of built-in features and features defined by plugins. +In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. +```ts +client.features.getFeatures({ ... }) +``` + +### Arguments [_arguments_features.get_features] + +#### Request (object) [_request_features.get_features] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.features.resetFeatures [_features.reset_features] +Reset the features. +Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. + +WARNING: Intended for development and testing use only. Do not reset features on a production cluster. + +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. +This deletes all state information stored in system indices. + +The response code is HTTP 200 if the state is successfully reset for all features. +It is HTTP 500 if the reset operation failed for any feature. + +Note that select features might provide a way to reset particular system indices. +Using this API resets all features, both those that are built-in and implemented as plugins. + +To list the features that will be affected, use the get features API. + +IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. +```ts +client.features.resetFeatures({ ... }) +``` + +### Arguments [_arguments_features.reset_features] + +#### Request (object) [_request_features.reset_features] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.fleet.globalCheckpoints [_fleet.global_checkpoints] +Get global checkpoints. + +Get the current global checkpoints for an index. +This API is designed for internal use by the Fleet server project. +```ts +client.fleet.globalCheckpoints({ index }) +``` + +### Arguments [_arguments_fleet.global_checkpoints] + +#### Request (object) [_request_fleet.global_checkpoints] +- **`index` (string \| string)**: A single index or index alias that resolves to a single index. +- **`wait_for_advance` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the global checkpoints +to advance past the provided `checkpoints`. +- **`wait_for_index` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the target index to exist +and all primary shards be active. Can only be true when `wait_for_advance` is true. +- **`checkpoints` (Optional, number[])**: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, +the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list +will cause Elasticsearch to immediately return the current global checkpoints. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. + +## client.fleet.msearch [_fleet.msearch] +Executes several fleet searches with a single API request. + +The API follows the same structure as the multi search (`_msearch`) API. +However, similar to the fleet search API, it supports the `wait_for_checkpoints` parameter. +```ts +client.fleet.msearch({ ... }) +``` + +### Arguments [_arguments_fleet.msearch] + +#### Request (object) [_request_fleet.msearch] +- **`index` (Optional, string \| string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +- **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +- **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. +- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures. +If false, returns an error with no partial results. +Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. + +## client.fleet.search [_fleet.search] +The purpose of the fleet search api is to provide a search api where the search will only be executed +after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. +```ts +client.fleet.search({ index }) +``` + +### Arguments [_arguments_fleet.search] + +#### Request (object) [_request_fleet.search] +- **`index` (string \| string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`aggregations` (Optional, Record)** +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** +- **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +- **`highlight` (Optional, { encoder, fields })** +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +- **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +- **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are +not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** +- **`profile` (Optional, boolean)** +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])** +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])** +- **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +- **`slice` (Optional, { field, id, max })** +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +- **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +- **`suggest` (Optional, { text })** +- **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +- **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If true, returns document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +- **`stored_fields` (Optional, string \| string[])**: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +- **`allow_no_indices` (Optional, boolean)** +- **`analyzer` (Optional, string)** +- **`analyze_wildcard` (Optional, boolean)** +- **`batched_reduce_size` (Optional, number)** +- **`ccs_minimize_roundtrips` (Optional, boolean)** +- **`default_operator` (Optional, Enum("and" \| "or"))** +- **`df` (Optional, string)** +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])** +- **`ignore_throttled` (Optional, boolean)** +- **`ignore_unavailable` (Optional, boolean)** +- **`lenient` (Optional, boolean)** +- **`max_concurrent_shard_requests` (Optional, number)** +- **`min_compatible_shard_node` (Optional, string)** +- **`preference` (Optional, string)** +- **`pre_filter_shard_size` (Optional, number)** +- **`request_cache` (Optional, boolean)** +- **`routing` (Optional, string)** +- **`scroll` (Optional, string \| -1 \| 0)** +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))** +- **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))** +- **`suggest_size` (Optional, number)** +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. +- **`typed_keys` (Optional, boolean)** +- **`rest_total_hits_as_int` (Optional, boolean)** +- **`_source_excludes` (Optional, string \| string[])** +- **`_source_includes` (Optional, string \| string[])** +- **`q` (Optional, string)** +- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns +an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` +which is true by default. + +## client.graph.explore [_graph.explore] +Explore graph analytics. +Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. +The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. +An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. +Subsequent requests enable you to spider out from one more vertices of interest. +You can exclude vertices that have already been returned. +```ts +client.graph.explore({ index }) +``` + +### Arguments [_arguments_graph.explore] + +#### Request (object) [_request_graph.explore] +- **`index` (string \| string[])**: Name of the index. +- **`connections` (Optional, { connections, query, vertices })**: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. +- **`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })**: Direct the Graph API how to build the graph. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +- **`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])**: Specifies one or more fields that contain the terms you want to include in the graph as vertices. +- **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the period of time to wait for a response from each shard. +If no response is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. + +## client.ilm.deleteLifecycle [_ilm.delete_lifecycle] +Delete a lifecycle policy. +You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. +```ts +client.ilm.deleteLifecycle({ policy }) +``` + +### Arguments [_arguments_ilm.delete_lifecycle] + +#### Request (object) [_request_ilm.delete_lifecycle] +- **`policy` (string)**: Identifier for the policy. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.explainLifecycle [_ilm.explain_lifecycle] +Explain the lifecycle state. +Get the current lifecycle status for one or more indices. +For data streams, the API retrieves the current lifecycle status for the stream's backing indices. + +The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. +```ts +client.ilm.explainLifecycle({ index }) +``` + +### Arguments [_arguments_ilm.explain_lifecycle] + +#### Request (object) [_request_ilm.explain_lifecycle] +- **`index` (string)**: List of data streams, indices, and aliases to target. Supports wildcards (`*`). +To target all data streams and indices, use `*` or `_all`. +- **`only_errors` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. +- **`only_managed` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.getLifecycle [_ilm.get_lifecycle] +Get lifecycle policies. +```ts +client.ilm.getLifecycle({ ... }) +``` + +### Arguments [_arguments_ilm.get_lifecycle] + +#### Request (object) [_request_ilm.get_lifecycle] +- **`policy` (Optional, string)**: Identifier for the policy. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.getStatus [_ilm.get_status] +Get the ILM status. + +Get the current index lifecycle management status. +```ts +client.ilm.getStatus() +``` + + +## client.ilm.migrateToDataTiers [_ilm.migrate_to_data_tiers] +Migrate to data tiers routing. +Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +Optionally, delete one legacy index template. +Using node roles enables ILM to automatically move the indices between data tiers. + +Migrating away from custom node attributes routing can be manually performed. +This API provides an automated way of performing three out of the four manual steps listed in the migration guide: + +1. Stop setting the custom hot attribute on new indices. +1. Remove custom allocation settings from existing ILM policies. +1. Replace custom allocation settings from existing indices with the corresponding tier preference. + +ILM must be stopped before performing the migration. +Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. +```ts +client.ilm.migrateToDataTiers({ ... }) +``` + +### Arguments [_arguments_ilm.migrate_to_data_tiers] + +#### Request (object) [_request_ilm.migrate_to_data_tiers] +- **`legacy_template_to_delete` (Optional, string)** +- **`node_attribute` (Optional, string)** +- **`dry_run` (Optional, boolean)**: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. +This provides a way to retrieve the indices and ILM policies that need to be migrated. + +## client.ilm.moveToStep [_ilm.move_to_step] +Move to a lifecycle step. +Manually move an index into a specific step in the lifecycle policy and run that step. + +WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. + +You must specify both the current step and the step to be executed in the body of the request. +The request will fail if the current step does not match the step currently running for the index +This is to prevent the index from being moved from an unexpected step into the next step. + +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. +If only the phase is specified, the index will move to the first step of the first action in the target phase. +If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. +Only actions specified in the ILM policy are considered valid. +An index cannot move to a step that is not part of its policy. +```ts +client.ilm.moveToStep({ index, current_step, next_step }) +``` + +### Arguments [_arguments_ilm.move_to_step] + +#### Request (object) [_request_ilm.move_to_step] +- **`index` (string)**: The name of the index whose lifecycle step is to change +- **`current_step` ({ action, name, phase })**: The step that the index is expected to be in. +- **`next_step` ({ action, name, phase })**: The step that you want to run. + +## client.ilm.putLifecycle [_ilm.put_lifecycle] +Create or update a lifecycle policy. +If the specified policy exists, it is replaced and the policy version is incremented. + +NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. +```ts +client.ilm.putLifecycle({ policy }) +``` + +### Arguments [_arguments_ilm.put_lifecycle] + +#### Request (object) [_request_ilm.put_lifecycle] +- **`policy` (string)**: Identifier for the policy. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.removePolicy [_ilm.remove_policy] +Remove policies from an index. +Remove the assigned lifecycle policies from an index or a data stream's backing indices. +It also stops managing the indices. +```ts +client.ilm.removePolicy({ index }) +``` + +### Arguments [_arguments_ilm.remove_policy] + +#### Request (object) [_request_ilm.remove_policy] +- **`index` (string)**: The name of the index to remove policy on + +## client.ilm.retry [_ilm.retry] +Retry a policy. +Retry running the lifecycle policy for an index that is in the ERROR step. +The API sets the policy back to the step where the error occurred and runs the step. +Use the explain lifecycle state API to determine whether an index is in the ERROR step. +```ts +client.ilm.retry({ index }) +``` + +### Arguments [_arguments_ilm.retry] + +#### Request (object) [_request_ilm.retry] +- **`index` (string)**: The name of the indices (comma-separated) whose failed lifecycle step is to be retry + +## client.ilm.start [_ilm.start] +Start the ILM plugin. +Start the index lifecycle management plugin if it is currently stopped. +ILM is started automatically when the cluster is formed. +Restarting ILM is necessary only when it has been stopped using the stop ILM API. +```ts +client.ilm.start({ ... }) +``` + +### Arguments [_arguments_ilm.start] + +#### Request (object) [_request_ilm.start] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.stop [_ilm.stop] +Stop the ILM plugin. +Halt all lifecycle management operations and stop the index lifecycle management plugin. +This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. + +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. +Use the get ILM status API to check whether ILM is running. +```ts +client.ilm.stop({ ... }) +``` + +### Arguments [_arguments_ilm.stop] + +#### Request (object) [_request_ilm.stop] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.addBlock [_indices.add_block] +Add an index block. + +Add an index block to an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. +```ts +client.indices.addBlock({ index, block }) +``` + +### Arguments [_arguments_indices.add_block] + +#### Request (object) [_request_indices.add_block] +- **`index` (string)**: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are adding blocks to. +To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +- **`block` (Enum("metadata" \| "read" \| "read_only" \| "write"))**: The block type to add to the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.indices.analyze [_indices.analyze] +Get tokens from text analysis. +The analyze API performs analysis on a text string and returns the resulting tokens. + +Generating excessive amount of tokens may cause a node to run out of memory. +The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. +If more than this limit of tokens gets generated, an error occurs. +The `_analyze` endpoint without a specified index will always use `10000` as its limit. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-analyze) + +```ts +client.indices.analyze({ ... }) +``` + +### Arguments [_arguments_indices.analyze] + +#### Request (object) [_request_indices.analyze] +- **`index` (Optional, string)**: Index used to derive the analyzer. +If specified, the `analyzer` or field parameter overrides this value. +If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. +- **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. +This could be a built-in analyzer, or an analyzer that’s been configured in the index. +- **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. +- **`char_filter` (Optional, string \| { type, escaped_tags } \| { type, mappings, mappings_path } \| { type, flags, pattern, replacement } \| { type, mode, name, unicode_set_filter } \| { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. +- **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. +- **`field` (Optional, string)**: Field used to derive the analyzer. +To use this parameter, you must specify an index. +If specified, the `analyzer` parameter overrides this value. +- **`filter` (Optional, string \| { type } \| { type } \| { type } \| { type, preserve_original } \| { type } \| { type } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type } \| { type, delimiter, encoding } \| { type } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, flags, pattern, replacement } \| { type } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. +- **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. +- **`text` (Optional, string \| string[])**: Text to analyze. +If an array of strings is provided, it is analyzed as a multi-value field. +- **`tokenizer` (Optional, string \| { type, tokenize_on_chars, max_token_length } \| { type, max_token_length } \| { type, custom_token_chars, max_gram, min_gram, token_chars } \| { type, buffer_size } \| { type } \| { type } \| { type, custom_token_chars, max_gram, min_gram, token_chars } \| { type, buffer_size, delimiter, replacement, reverse, skip } \| { type, flags, group, pattern } \| { type, pattern } \| { type, pattern } \| { type, max_token_length } \| { type } \| { type, max_token_length } \| { type, max_token_length } \| { type, rule_files } \| { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } \| { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. + +## client.indices.cancelMigrateReindex [_indices.cancel_migrate_reindex] +Cancel a migration reindex operation. + +Cancel a migration reindex attempt for a data stream or index. +```ts +client.indices.cancelMigrateReindex({ index }) +``` + +### Arguments [_arguments_indices.cancel_migrate_reindex] + +#### Request (object) [_request_indices.cancel_migrate_reindex] +- **`index` (string \| string[])**: The index or data stream name + +## client.indices.clearCache [_indices.clear_cache] +Clear the cache. +Clear the cache of one or more indices. +For data streams, the API clears the caches of the stream's backing indices. + +By default, the clear cache API clears all caches. +To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. +To clear the cache only of specific fields, use the `fields` parameter. +```ts +client.indices.clearCache({ ... }) +``` + +### Arguments [_arguments_indices.clear_cache] + +#### Request (object) [_request_indices.clear_cache] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. +Use the `fields` parameter to clear the cache of specific fields only. +- **`fields` (Optional, string \| string[])**: List of field names used to limit the `fielddata` parameter. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`query` (Optional, boolean)**: If `true`, clears the query cache. +- **`request` (Optional, boolean)**: If `true`, clears the request cache. + +## client.indices.clone [_indices.clone] +Clone an index. +Clone an existing index into a new index. +Each original primary shard is cloned into a new primary shard in the new index. + +IMPORTANT: Elasticsearch does not apply index templates to the resulting index. +The API also does not copy index metadata from the original index. +Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. +For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. +To set the number of replicas in the resulting index, configure these settings in the clone request. + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source index. +* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Finally, it recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be cloned if they meet the following requirements: + +* The index must be marked as read-only and have a cluster health status of green. +* The target index must not exist. +* The source index must have the same number of primary shards as the target index. +* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. + +The current write index on a data stream cannot be cloned. +In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. + +NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. + +**Monitor the cloning process** + +The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. + +The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. +At this point, all shards are in the state unassigned. +If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. + +Once the primary shard is allocated, it moves to state initializing, and the clone process begins. +When the clone operation completes, the shard will become active. +At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. + +**Wait for active shards** + +Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. +```ts +client.indices.clone({ index, target }) +``` + +### Arguments [_arguments_indices.clone] + +#### Request (object) [_request_indices.clone] +- **`index` (string)**: Name of the source index to clone. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: Aliases for the resulting index. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.close [_indices.close] +Close an index. +A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. + +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behaviour can be turned off using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. +```ts +client.indices.close({ index }) +``` + +### Arguments [_arguments_indices.close] + +#### Request (object) [_request_indices.close] +- **`index` (string \| string[])**: List or wildcard expression of index names used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.create [_indices.create] +Create an index. +You can use the create index API to add a new index to an Elasticsearch cluster. +When creating an index, you can specify the following: + +* Settings for the index. +* Mappings for fields in the index. +* Index aliases + +**Wait for active shards** + +By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. +The index creation response will indicate what happened. +For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. +Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. +These values simply indicate whether the operation completed before the timeout. +If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. +If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). + +You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. +Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. +```ts +client.indices.create({ index }) +``` + +### Arguments [_arguments_indices.create] + +#### Request (object) [_request_indices.create] +- **`index` (string)**: Name of the index you wish to create. +- **`aliases` (Optional, Record)**: Aliases for the index. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include: +- Field names +- Field data types +- Mapping parameters +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.createDataStream [_indices.create_data_stream] +Create a data stream. + +You must have a matching index template with data stream enabled. +```ts +client.indices.createDataStream({ name }) +``` + +### Arguments [_arguments_indices.create_data_stream] + +#### Request (object) [_request_indices.create_data_stream] +- **`name` (string)**: Name of the data stream, which must meet the following criteria: +Lowercase only; +Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; +Cannot start with `-`, `_`, `+`, or `.ds-`; +Cannot be `.` or `..`; +Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.createFrom [_indices.create_from] +Create an index from a source index. + +Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. +```ts +client.indices.createFrom({ source, dest }) +``` + +### Arguments [_arguments_indices.create_from] + +#### Request (object) [_request_indices.create_from] +- **`source` (string)**: The source index or data stream name +- **`dest` (string)**: The destination index or data stream name +- **`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })** + +## client.indices.dataStreamsStats [_indices.data_streams_stats] +Get data stream stats. + +Get statistics for one or more data streams. +```ts +client.indices.dataStreamsStats({ ... }) +``` + +### Arguments [_arguments_indices.data_streams_stats] + +#### Request (object) [_request_indices.data_streams_stats] +- **`name` (Optional, string)**: List of data streams used to limit the request. +Wildcard expressions (`*`) are supported. +To target all data streams in a cluster, omit this parameter or use `*`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. + +## client.indices.delete [_indices.delete] +Delete indices. +Deleting an index deletes its documents, shards, and metadata. +It does not delete related Kibana components, such as data views, visualizations, or dashboards. + +You cannot delete the current write index of a data stream. +To delete the index, you must roll over the data stream so a new write index is created. +You can then use the delete index API to delete the previous write index. +```ts +client.indices.delete({ index }) +``` + +### Arguments [_arguments_indices.delete] + +#### Request (object) [_request_indices.delete] +- **`index` (string \| string[])**: List of indices to delete. +You cannot specify index aliases. +By default, this parameter does not support wildcards (`*`) or `_all`. +To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.deleteAlias [_indices.delete_alias] +Delete an alias. +Removes a data stream or index from an alias. +```ts +client.indices.deleteAlias({ index, name }) +``` + +### Arguments [_arguments_indices.delete_alias] + +#### Request (object) [_request_indices.delete_alias] +- **`index` (string \| string[])**: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +- **`name` (string \| string[])**: List of aliases to remove. +Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.deleteDataLifecycle [_indices.delete_data_lifecycle] +Delete data stream lifecycles. +Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. +```ts +client.indices.deleteDataLifecycle({ name }) +``` + +### Arguments [_arguments_indices.delete_data_lifecycle] + +#### Request (object) [_request_indices.delete_data_lifecycle] +- **`name` (string \| string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit timestamp for the document + +## client.indices.deleteDataStream [_indices.delete_data_stream] +Delete data streams. +Deletes one or more data streams and their backing indices. +```ts +client.indices.deleteDataStream({ name }) +``` + +### Arguments [_arguments_indices.delete_data_stream] + +#### Request (object) [_request_indices.delete_data_stream] +- **`name` (string \| string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. + +## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options] +Deletes the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.deleteDataStreamOptions() +``` + + +## client.indices.deleteIndexTemplate [_indices.delete_index_template] +Delete an index template. +The provided may contain multiple template names separated by a comma. If multiple template +names are specified then there is no wildcard support and the provided names should match completely with +existing templates. +```ts +client.indices.deleteIndexTemplate({ name }) +``` + +### Arguments [_arguments_indices.delete_index_template] + +#### Request (object) [_request_indices.delete_index_template] +- **`name` (string \| string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.deleteTemplate [_indices.delete_template] +Delete a legacy index template. +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. +```ts +client.indices.deleteTemplate({ name }) +``` + +### Arguments [_arguments_indices.delete_template] + +#### Request (object) [_request_indices.delete_template] +- **`name` (string)**: The name of the legacy index template to delete. +Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.diskUsage [_indices.disk_usage] +Analyze the index disk usage. +Analyze the disk usage of each field of an index or data stream. +This API might not support indices created in previous Elasticsearch versions. +The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. + +NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. +Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. +The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. +```ts +client.indices.diskUsage({ index }) +``` + +### Arguments [_arguments_indices.disk_usage] + +#### Request (object) [_request_indices.disk_usage] +- **`index` (string \| string[])**: List of data streams, indices, and aliases used to limit the request. +It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flush` (Optional, boolean)**: If `true`, the API performs a flush before analysis. +If `false`, the response may not include uncommitted data. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`run_expensive_tasks` (Optional, boolean)**: Analyzing field disk usage is resource-intensive. +To use the API, this parameter must be set to `true`. + +## client.indices.downsample [_indices.downsample] +Downsample an index. +Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. +All documents within an hour interval are summarized and stored as a single document in the downsample index. + +NOTE: Only indices in a time series data stream are supported. +Neither field nor document level security can be defined on the source index. +The source index must be read only (`index.blocks.write: true`). +```ts +client.indices.downsample({ index, target_index }) +``` + +### Arguments [_arguments_indices.downsample] + +#### Request (object) [_request_indices.downsample] +- **`index` (string)**: Name of the time series index to downsample. +- **`target_index` (string)**: Name of the index to create. +- **`config` (Optional, { fixed_interval })** + +## client.indices.exists [_indices.exists] +Check indices. +Check if one or more indices, index aliases, or data streams exist. +```ts +client.indices.exists({ index }) +``` + +### Arguments [_arguments_indices.exists] + +#### Request (object) [_request_indices.exists] +- **`index` (string \| string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + +## client.indices.existsAlias [_indices.exists_alias] +Check aliases. + +Check if one or more data stream or index aliases exist. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-alias) + +```ts +client.indices.existsAlias({ name }) +``` + +### Arguments [_arguments_indices.exists_alias] + +#### Request (object) [_request_indices.exists_alias] +- **`name` (string \| string[])**: List of aliases to check. Supports wildcards (`*`). +- **`index` (Optional, string \| string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + +## client.indices.existsIndexTemplate [_indices.exists_index_template] +Check index templates. + +Check whether index templates exist. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-index-template) + +```ts +client.indices.existsIndexTemplate({ name }) +``` + +### Arguments [_arguments_indices.exists_index_template] + +#### Request (object) [_request_indices.exists_index_template] +- **`name` (string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.existsTemplate [_indices.exists_template] +Check existence of index templates. +Get information about whether index templates exist. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. +```ts +client.indices.existsTemplate({ name }) +``` + +### Arguments [_arguments_indices.exists_template] + +#### Request (object) [_request_indices.exists_template] +- **`name` (string \| string[])**: A list of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`flat_settings` (Optional, boolean)**: Indicates whether to use a flat format for the response. +- **`local` (Optional, boolean)**: Indicates whether to get information from the local node only. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.indices.explainDataLifecycle [_indices.explain_data_lifecycle] +Get the status for a data stream lifecycle. +Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. +```ts +client.indices.explainDataLifecycle({ index }) +``` + +### Arguments [_arguments_indices.explain_data_lifecycle] + +#### Request (object) [_request_indices.explain_data_lifecycle] +- **`index` (string \| string[])**: The name of the index to explain +- **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index's lifecycle +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master + +## client.indices.fieldUsageStats [_indices.field_usage_stats] +Get field usage stats. +Get field usage information for each shard and field of an index. +Field usage statistics are automatically captured when queries are running on a cluster. +A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. + +The response body reports the per-shard usage count of the data structures that back the fields in the index. +A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. +```ts +client.indices.fieldUsageStats({ index }) +``` + +### Arguments [_arguments_indices.field_usage_stats] + +#### Request (object) [_request_indices.field_usage_stats] +- **`index` (string \| string[])**: List or wildcard expression of index names used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. + +## client.indices.flush [_indices.flush] +Flush data streams or indices. +Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. +Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. + +After each operation has been flushed it is permanently stored in the Lucene index. +This may mean that there is no need to maintain an additional copy of it in the transaction log. +The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. + +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. +If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. +```ts +client.indices.flush({ ... }) +``` + +### Arguments [_arguments_indices.flush] + +#### Request (object) [_request_indices.flush] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases to flush. +Supports wildcards (`*`). +To flush all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`force` (Optional, boolean)**: If `true`, the request forces a flush even if there are no changes to commit to the index. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`wait_if_ongoing` (Optional, boolean)**: If `true`, the flush operation blocks until execution when another flush operation is running. +If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. + +## client.indices.forcemerge [_indices.forcemerge] +Force a merge. +Perform the force merge operation on the shards of one or more indices. +For data streams, the API forces a merge on the shards of the stream's backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. +Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). +When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". +These soft-deleted documents are automatically cleaned up during regular segment merges. +But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. +So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. +If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. + +**Blocks during a force merge** + +Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). +If the client connection is lost before completion then the force merge process will continue in the background. +Any new requests to force merge the same indices will also block until the ongoing force merge is complete. + +**Running force merge asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. +However, you can not cancel this task as the force merge task is not cancelable. +Elasticsearch creates a record of this task as a document at `_tasks/`. +When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +**Force merging multiple indices** + +You can force merge multiple indices with a single request by targeting: + +* One or more data streams that contain multiple backing indices +* Multiple indices +* One or more aliases +* All data streams and indices in a cluster + +Each targeted shard is force-merged separately using the force_merge threadpool. +By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. +If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel + +Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. + +**Data streams and time-based indices** + +Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. +In these cases, each index only receives indexing traffic for a certain period of time. +Once an index receive no more writes, its shards can be force-merged to a single segment. +This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. +For example: + +``` +POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +``` +```ts +client.indices.forcemerge({ ... }) +``` + +### Arguments [_arguments_indices.forcemerge] + +#### Request (object) [_request_indices.forcemerge] +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) +- **`only_expunge_deletes` (Optional, boolean)**: Specify whether the operation should only expunge deleted documents +- **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed. + +## client.indices.get [_indices.get] +Get index information. +Get information about one or more indices. For data streams, the API returns information about the +stream’s backing indices. +```ts +client.indices.get({ index }) +``` + +### Arguments [_arguments_indices.get] + +#### Request (object) [_request_indices.get] +- **`index` (string \| string[])**: List of data streams, indices, and index aliases used to limit the request. +Wildcard expressions (*) are supported. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only +missing or closed indices. This behavior applies even if the request targets other open indices. For example, +a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as open,hidden. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If false, requests that target a missing index return an error. +- **`include_defaults` (Optional, boolean)**: If true, return all default settings in the response. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`features` (Optional, { name, description } \| { name, description }[])**: Return only information on specified index features + +## client.indices.getAlias [_indices.get_alias] +Get aliases. +Retrieves information for one or more data stream or index aliases. +```ts +client.indices.getAlias({ ... }) +``` + +### Arguments [_arguments_indices.get_alias] + +#### Request (object) [_request_indices.get_alias] +- **`name` (Optional, string \| string[])**: List of aliases to retrieve. +Supports wildcards (`*`). +To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`index` (Optional, string \| string[])**: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + +## client.indices.getDataLifecycle [_indices.get_data_lifecycle] +Get data stream lifecycles. + +Get the data stream lifecycle configuration of one or more data streams. +```ts +client.indices.getDataLifecycle({ name }) +``` + +### Arguments [_arguments_indices.get_data_lifecycle] + +#### Request (object) [_request_indices.get_data_lifecycle] +- **`name` (string \| string[])**: List of data streams to limit the request. +Supports wildcards (`*`). +To target all data streams, omit this parameter or use `*` or `_all`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getDataLifecycleStats [_indices.get_data_lifecycle_stats] +Get data stream lifecycle stats. +Get statistics about the data streams that are managed by a data stream lifecycle. +```ts +client.indices.getDataLifecycleStats() +``` + + +## client.indices.getDataStream [_indices.get_data_stream] +Get data streams. + +Get information about one or more data streams. +```ts +client.indices.getDataStream({ ... }) +``` + +### Arguments [_arguments_indices.get_data_stream] + +#### Request (object) [_request_indices.get_data_stream] +- **`name` (Optional, string \| string[])**: List of data stream names used to limit the request. +Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. + +## client.indices.getDataStreamOptions [_indices.get_data_stream_options] +Returns the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.getDataStreamOptions() +``` + + +## client.indices.getDataStreamSettings [_indices.get_data_stream_settings] +Gets a data stream's settings + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html) + +```ts +client.indices.getDataStreamSettings() +``` + + +## client.indices.getFieldMapping [_indices.get_field_mapping] +Get mapping definitions. +Retrieves mapping definitions for one or more fields. +For data streams, the API retrieves field mappings for the stream’s backing indices. + +This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. +```ts +client.indices.getFieldMapping({ fields }) +``` + +### Arguments [_arguments_indices.get_field_mapping] + +#### Request (object) [_request_indices.get_field_mapping] +- **`fields` (string \| string[])**: List or wildcard expression of fields used to limit returned information. +Supports wildcards (`*`). +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + +## client.indices.getIndexTemplate [_indices.get_index_template] +Get index templates. +Get information about one or more index templates. +```ts +client.indices.getIndexTemplate({ ... }) +``` + +### Arguments [_arguments_indices.get_index_template] + +#### Request (object) [_request_indices.get_index_template] +- **`name` (Optional, string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + +## client.indices.getMapping [_indices.get_mapping] +Get mapping definitions. +For data streams, the API retrieves mappings for the stream’s backing indices. +```ts +client.indices.getMapping({ ... }) +``` + +### Arguments [_arguments_indices.get_mapping] + +#### Request (object) [_request_indices.get_mapping] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getMigrateReindexStatus [_indices.get_migrate_reindex_status] +Get the migration reindexing status. + +Get the status of a migration reindex attempt for a data stream or index. +```ts +client.indices.getMigrateReindexStatus({ index }) +``` + +### Arguments [_arguments_indices.get_migrate_reindex_status] + +#### Request (object) [_request_indices.get_migrate_reindex_status] +- **`index` (string \| string[])**: The index or data stream name. + +## client.indices.getSettings [_indices.get_settings] +Get index settings. +Get setting information for one or more indices. +For data streams, it returns setting information for the stream's backing indices. +```ts +client.indices.getSettings({ ... }) +``` + +### Arguments [_arguments_indices.get_settings] + +#### Request (object) [_request_indices.get_settings] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +- **`name` (Optional, string \| string[])**: List or wildcard expression of settings to retrieve. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with foo but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If +`false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. + +## client.indices.getTemplate [_indices.get_template] +Get legacy index templates. +Get information about one or more index templates. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. +```ts +client.indices.getTemplate({ ... }) +``` + +### Arguments [_arguments_indices.get_template] + +#### Request (object) [_request_indices.get_template] +- **`name` (Optional, string \| string[])**: List of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +To return all index templates, omit this parameter or use a value of `_all` or `*`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.migrateReindex [_indices.migrate_reindex] +Reindex legacy backing indices. + +Reindex all legacy backing indices for a data stream. +This operation occurs in a persistent task. +The persistent task ID is returned immediately and the reindexing work is completed in that task. +```ts +client.indices.migrateReindex({ ... }) +``` + +### Arguments [_arguments_indices.migrate_reindex] + +#### Request (object) [_request_indices.migrate_reindex] +- **`reindex` (Optional, { mode, source })** + +## client.indices.migrateToDataStream [_indices.migrate_to_data_stream] +Convert an index alias to a data stream. +Converts an index alias to a data stream. +You must have a matching index template that is data stream enabled. +The alias must meet the following criteria: +The alias must have a write index; +All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; +The alias must not have any filters; +The alias must not use custom routing. +If successful, the request removes the alias and creates a data stream with the same name. +The indices for the alias become hidden backing indices for the stream. +The write index for the alias becomes the write index for the stream. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-migrate-to-data-stream) + +```ts +client.indices.migrateToDataStream({ name }) +``` + +### Arguments [_arguments_indices.migrate_to_data_stream] + +#### Request (object) [_request_indices.migrate_to_data_stream] +- **`name` (string)**: Name of the index alias to convert to a data stream. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.modifyDataStream [_indices.modify_data_stream] +Update data streams. +Performs one or more data stream modification actions in a single atomic operation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-modify-data-stream) + +```ts +client.indices.modifyDataStream({ actions }) +``` + +### Arguments [_arguments_indices.modify_data_stream] + +#### Request (object) [_request_indices.modify_data_stream] +- **`actions` ({ add_backing_index, remove_backing_index }[])**: Actions to perform. + +## client.indices.open [_indices.open] +Open a closed index. +For data streams, the API opens any closed backing indices. + +A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. + +When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behavior can be turned off by using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. +```ts +client.indices.open({ index }) +``` + +### Arguments [_arguments_indices.open] + +#### Request (object) [_request_indices.open] +- **`index` (string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +By default, you must explicitly name the indices you using to limit the request. +To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. +You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.promoteDataStream [_indices.promote_data_stream] +Promote a data stream. +Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. +These data streams can't be rolled over in the local cluster. +These replicated data streams roll over only if the upstream data stream rolls over. +In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. +If this is missing, the data stream will not be able to roll over until a matching index template is created. +This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-promote-data-stream) + +```ts +client.indices.promoteDataStream({ name }) +``` + +### Arguments [_arguments_indices.promote_data_stream] + +#### Request (object) [_request_indices.promote_data_stream] +- **`name` (string)**: The name of the data stream +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putAlias [_indices.put_alias] +Create or update an alias. +Adds a data stream or index to an alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-alias) + +```ts +client.indices.putAlias({ index, name }) +``` + +### Arguments [_arguments_indices.put_alias] + +#### Request (object) [_request_indices.put_alias] +- **`index` (string \| string[])**: List of data streams or indices to add. +Supports wildcards (`*`). +Wildcard patterns that match both data streams and indices return an error. +- **`name` (string)**: Alias to update. +If the alias doesn’t exist, the request creates it. +Index alias names support date math. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query used to limit documents the alias can access. +- **`index_routing` (Optional, string)**: Value used to route indexing operations to a specific shard. +If specified, this overwrites the `routing` value for indexing operations. +Data stream aliases don’t support this parameter. +- **`is_write_index` (Optional, boolean)**: If `true`, sets the write index or data stream for the alias. +If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. +If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. +Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. +- **`routing` (Optional, string)**: Value used to route indexing and search operations to a specific shard. +Data stream aliases don’t support this parameter. +- **`search_routing` (Optional, string)**: Value used to route search operations to a specific shard. +If specified, this overwrites the `routing` value for search operations. +Data stream aliases don’t support this parameter. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putDataLifecycle [_indices.put_data_lifecycle] +Update data stream lifecycles. +Update the data stream lifecycle of the specified data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-data-lifecycle) + +```ts +client.indices.putDataLifecycle({ name }) +``` + +### Arguments [_arguments_indices.put_data_lifecycle] + +#### Request (object) [_request_indices.put_data_lifecycle] +- **`name` (string \| string[])**: List of data streams used to limit the request. +Supports wildcards (`*`). +To target all data streams use `*` or `_all`. +- **`data_retention` (Optional, string \| -1 \| 0)**: If defined, every document added to this data stream will be stored at least for this time frame. +Any time after this duration the document could be deleted. +When empty, every document in this data stream will be stored indefinitely. +- **`downsampling` (Optional, { rounds })**: The downsampling configuration to execute for the managed backing index after rollover. +- **`enabled` (Optional, boolean)**: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle +that's disabled (enabled: `false`) will have no effect on the data stream. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putDataStreamOptions [_indices.put_data_stream_options] +Updates the data stream options of the selected data streams. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +```ts +client.indices.putDataStreamOptions() +``` + + +## client.indices.putDataStreamSettings [_indices.put_data_stream_settings] +Updates a data stream's settings + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html) + +```ts +client.indices.putDataStreamSettings() +``` + + +## client.indices.putIndexTemplate [_indices.put_index_template] +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. +Index templates are applied during data stream or index creation. +For data streams, these settings and mappings are applied when the stream's backing indices are created. +Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. +Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Multiple matching templates** + +If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. + +Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. + +**Composing aliases, mappings, and settings** + +When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. +Any mappings, settings, or aliases from the parent index template are merged in next. +Finally, any configuration on the index request itself is merged. +Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. +If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. +This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. +If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. +If an entry already exists with the same key, then it is overwritten by the new definition. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-put-index-template) + +```ts +client.indices.putIndexTemplate({ name }) +``` + +### Arguments [_arguments_indices.put_index_template] + +#### Request (object) [_request_indices.put_index_template] +- **`name` (string)**: Index or template name +- **`index_patterns` (Optional, string \| string[])**: Name of the index template to create. +- **`composed_of` (Optional, string[])**: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +- **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +- **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +- **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +- **`version` (Optional, number)**: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +External systems can use these version numbers to simplify template management. +To unset a version, replace the template without specifying one. +- **`_meta` (Optional, Record)**: Optional user metadata about the index template. +It may have any contents. +It is not automatically generated or used by Elasticsearch. +This user-defined object is stored in the cluster state, so keeping it short is preferable +To unset the metadata, replace the template without specifying it. +- **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +- **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`cause` (Optional, string)**: User defined reason for creating/updating the index template + +## client.indices.putMapping [_indices.put_mapping] +Update field mappings. +Add new fields to an existing data stream or index. +You can also use this API to change the search settings of existing fields and add new properties to existing object fields. +For data streams, these changes are applied to all backing indices by default. + +**Add multi-fields to an existing field** + +Multi-fields let you index the same field in different ways. +You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. +WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. +You can populate the new multi-field with the update by query API. + +**Change supported mapping parameters for an existing field** + +The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. +For example, you can use the update mapping API to update the `ignore_above` parameter. + +**Change the mapping of an existing field** + +Except for supported mapping parameters, you can't change the mapping or field type of an existing field. +Changing an existing field could invalidate data that's already indexed. + +If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. +If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. + +**Rename a field** + +Renaming a field would invalidate data already indexed under the old field name. +Instead, add an alias field to create an alternate field name. +```ts +client.indices.putMapping({ index }) +``` + +### Arguments [_arguments_indices.put_mapping] + +#### Request (object) [_request_indices.put_mapping] +- **`index` (string \| string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. +- **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. +- **`dynamic` (Optional, Enum("strict" \| "runtime" \| true \| false))**: Controls whether new fields are added dynamically. +- **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked +against 'dynamic_date_formats' and if the value matches then +a new date field is added instead of string. +- **`dynamic_templates` (Optional, Record[])**: Specify dynamic templates for the mapping. +- **`_field_names` (Optional, { enabled })**: Control whether field names are enabled for the index. +- **`_meta` (Optional, Record)**: A mapping type can have custom meta data associated with it. These are +not used at all by Elasticsearch, but can be used to store +application-specific metadata. +- **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: + +- Field name +- Field data type +- Mapping parameters +- **`_routing` (Optional, { required })**: Enable making a routing value required on indexed documents. +- **`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })**: Control whether the _source field is enabled on the index. +- **`runtime` (Optional, Record)**: Mapping of runtime fields for the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. + +## client.indices.putSettings [_indices.put_settings] +Update index settings. +Changes dynamic index settings in real time. +For data streams, index setting changes are applied to all backing indices by default. + +To revert a setting to the default value, use a null value. +The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. +To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + + There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: + +``` +{ + "number_of_replicas": 1 +} +``` + +Or you can use an `index` setting object: +``` +{ + "index": { + "number_of_replicas": 1 + } +} +``` + +Or you can use dot annotation: +``` +{ + "index.number_of_replicas": 1 +} +``` + +Or you can embed any of the aforementioned options in a `settings` object. For example: + +``` +{ + "settings": { + "index": { + "number_of_replicas": 1 + } + } +} +``` + +NOTE: You can only define new analyzers on closed indices. +To add an analyzer, you must close the index, define the analyzer, and reopen the index. +You cannot close the write index of a data stream. +To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. +Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. +This affects searches and any new data added to the stream after the rollover. +However, it does not affect the data stream's backing indices or their existing data. +To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. +```ts +client.indices.putSettings({ ... }) +``` + +### Arguments [_arguments_indices.put_settings] + +#### Request (object) [_request_indices.put_settings] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target +data streams, this argument determines whether wildcard expressions match +hidden data streams. Supports a list of values, such as +`open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`preserve_existing` (Optional, boolean)**: If `true`, existing index settings remain unchanged. +- **`reopen` (Optional, boolean)**: Whether to close and reopen the index to apply non-dynamic settings. +If set to `true` the indices to which the settings are being applied +will be closed temporarily and then reopened in order to apply the changes. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. + +## client.indices.putTemplate [_indices.put_template] +Create or update a legacy index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +Composable templates always take precedence over legacy templates. +If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. +Changes to index templates do not affect existing indices. +Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Indices matching multiple templates** + +Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. +The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. +NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. +```ts +client.indices.putTemplate({ name }) +``` + +### Arguments [_arguments_indices.put_template] + +#### Request (object) [_request_indices.put_template] +- **`name` (string)**: The name of the template +- **`aliases` (Optional, Record)**: Aliases for the index. +- **`index_patterns` (Optional, string \| string[])**: Array of wildcard expressions used to match the names +of indices during creation. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. +- **`order` (Optional, number)**: Order in which Elasticsearch applies this template if index +matches multiple templates. + +Templates with lower 'order' values are merged first. Templates with higher +'order' values are merged later, overriding templates with lower values. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. +- **`version` (Optional, number)**: Version number used to manage index templates externally. This number +is not automatically generated by Elasticsearch. +To unset a version, replace the template without specifying one. +- **`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an error. +- **`cause` (Optional, string)**: User defined reason for creating/updating the index template + +## client.indices.recovery [_indices.recovery] +Get index recovery information. +Get information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream's backing indices. + +All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. + +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search and indexing. + +Recovery automatically occurs during the following processes: + +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A snapshot restore operation. +* A clone, shrink, or split operation. + +You can determine the cause of a shard recovery using the recovery or cat recovery APIs. + +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. +It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. +This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. +```ts +client.indices.recovery({ ... }) +``` + +### Arguments [_arguments_indices.recovery] + +#### Request (object) [_request_indices.recovery] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + +## client.indices.refresh [_indices.refresh] +Refresh an index. +A refresh makes recent operations performed on one or more indices available for search. +For data streams, the API runs the refresh operation on the stream’s backing indices. + +By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. +You can change this default interval with the `index.refresh_interval` setting. + +Refresh requests are synchronous and do not return a response until the refresh operation completes. + +Refreshes are resource-intensive. +To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. + +If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. +This option ensures the indexing operation waits for a periodic refresh before running the search. +```ts +client.indices.refresh({ ... }) +``` + +### Arguments [_arguments_indices.refresh] + +#### Request (object) [_request_indices.refresh] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + +## client.indices.reloadSearchAnalyzers [_indices.reload_search_analyzers] +Reload search analyzers. +Reload an index's search analyzers and their resources. +For data streams, the API reloads search analyzers and resources for the stream's backing indices. + +IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. + +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. +To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. + +NOTE: This API does not perform a reload for each shard of an index. +Instead, it performs a reload for each node containing index shards. +As a result, the total shard count returned by the API can differ from the number of index shards. +Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. +This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. +```ts +client.indices.reloadSearchAnalyzers({ index }) +``` + +### Arguments [_arguments_indices.reload_search_analyzers] + +#### Request (object) [_request_indices.reload_search_analyzers] +- **`index` (string \| string[])**: A list of index names to reload analyzers for +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable + +## client.indices.resolveCluster [_indices.resolve_cluster] +Resolve the cluster. + +Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. +If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. + +This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. + +You use the same index expression with this endpoint as you would for cross-cluster search. +Index and cluster exclusions are also supported with this endpoint. + +For each cluster in the index expression, information is returned about: + +* Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. +* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. +* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. +* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). +* Cluster version information, including the Elasticsearch server version. + +For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. +Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. + +## Note on backwards compatibility +The ability to query without an index expression was added in version 8.18, so when +querying remote clusters older than that, the local cluster will send the index +expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference +to that index expression even though you didn't request it. If it causes a problem, you can +instead include an index expression like `*:*` to bypass the issue. + +## Advantages of using this endpoint before a cross-cluster search + +You may want to exclude a cluster or index from a search when: + +* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. +* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. +* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) +* A remote cluster is an older version that does not support the feature you want to use in your search. + +## Test availability of remote clusters + +The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. +The remote cluster may be available, while the local cluster is not currently connected to it. + +You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. +For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +The `connected` field in the response will indicate whether it was successful. +If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. +```ts +client.indices.resolveCluster({ ... }) +``` + +### Arguments [_arguments_indices.resolve_cluster] + +#### Request (object) [_request_indices.resolve_cluster] +- **`name` (Optional, string \| string[])**: A list of names or index patterns for the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +If no index expression is specified, information about all remote clusters configured on the local cluster +is returned without doing any index matching +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing +or closed indices. This behavior applies even if the request targets other open indices. For example, a request +targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error if it targets a missing or closed index. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for remote clusters to respond. +If a remote cluster does not respond within this timeout period, the API response +will show the cluster as not connected and include an error message that the +request timed out. + +The default timeout is unset and the query can take +as long as the networking layer is configured to wait for remote clusters that are +not responding (typically 30 seconds). + +## client.indices.resolveIndex [_indices.resolve_index] +Resolve indices. +Resolve the names and/or index patterns for indices, aliases, and data streams. +Multiple patterns and remote clusters are supported. +```ts +client.indices.resolveIndex({ name }) +``` + +### Arguments [_arguments_indices.resolve_index] + +#### Request (object) [_request_indices.resolve_index] +- **`name` (string \| string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + +## client.indices.rollover [_indices.rollover] +Roll over to a new index. +TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. + +The rollover API creates a new index for a data stream or index alias. +The API behavior depends on the rollover target. + +**Roll over a data stream** + +If you roll over a data stream, the API creates a new write index for the stream. +The stream's previous write index becomes a regular backing index. +A rollover also increments the data stream's generation. + +**Roll over an index alias with a write index** + +TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. +Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. + +If an index alias points to multiple indices, one of the indices must be a write index. +The rollover API creates a new write index for the alias with `is_write_index` set to `true`. +The API also `sets is_write_index` to `false` for the previous write index. + +**Roll over an index alias with one index** + +If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. + +NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. + +**Increment index names for an alias** + +When you roll over an index alias, you can specify a name for the new index. +If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. +For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. +This number is always six characters and zero-padded, regardless of the previous index's name. + +If you use an index alias for time series data, you can use date math in the index name to track the rollover date. +For example, you can create an alias that points to an index named ``. +If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. +If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. +```ts +client.indices.rollover({ alias }) +``` + +### Arguments [_arguments_indices.rollover] + +#### Request (object) [_request_indices.rollover] +- **`alias` (string)**: Name of the data stream or index alias to roll over. +- **`new_index` (Optional, string)**: Name of the index to create. +Supports date math. +Data streams do not support this parameter. +- **`aliases` (Optional, Record)**: Aliases for the target index. +Data streams do not support this parameter. +- **`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })**: Conditions for the rollover. +If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. +If this parameter is not specified, Elasticsearch performs the rollover unconditionally. +If conditions are specified, at least one of them must be a `max_*` condition. +The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. +If specified, this mapping can include field names, field data types, and mapping paramaters. +- **`settings` (Optional, Record)**: Configuration options for the index. +Data streams do not support this parameter. +- **`dry_run` (Optional, boolean)**: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +- **`lazy` (Optional, boolean)**: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. +Only allowed on data streams. + +## client.indices.segments [_indices.segments] +Get index segments. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream's backing indices. +```ts +client.indices.segments({ ... }) +``` + +### Arguments [_arguments_indices.segments] + +#### Request (object) [_request_indices.segments] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`verbose` (Optional, boolean)**: If `true`, the request returns a verbose response. + +## client.indices.shardStores [_indices.shard_stores] +Get index shard stores. +Get store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream's backing indices. + +The index shard stores API returns the following information: + +* The node on which each replica shard exists. +* The allocation ID for each replica shard. +* A unique ID for each replica shard. +* Any errors encountered while opening the shard index or from an earlier failure. + +By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. +```ts +client.indices.shardStores({ ... }) +``` + +### Arguments [_arguments_indices.shard_stores] + +#### Request (object) [_request_indices.shard_stores] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all +value targets only missing or closed indices. This behavior applies even if the request +targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, +this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`status` (Optional, Enum("green" \| "yellow" \| "red" \| "all") \| Enum("green" \| "yellow" \| "red" \| "all")[])**: List of shard health statuses used to limit the request. + +## client.indices.shrink [_indices.shrink] +Shrink an index. +Shrink an index into a new index with fewer primary shards. + +Before you can shrink an index: + +* The index must be read-only. +* A copy of every shard in the index must reside on the same node. +* The index must have a green health status. + +To make shard allocation easier, we recommend you also remove the index's replica shards. +You can later re-add replica shards as part of the shrink operation. + +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. +For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. +If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. + +The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. + +A shrink operation: + +* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. +* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. +* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + +IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have more primary shards than the target index. +* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. +* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. +* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. +```ts +client.indices.shrink({ index, target }) +``` + +### Arguments [_arguments_indices.shrink] + +#### Request (object) [_request_indices.shrink] +- **`index` (string)**: Name of the source index to shrink. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: The key is the alias name. +Index alias names support date math. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.simulateIndexTemplate [_indices.simulate_index_template] +Simulate an index. +Get the index configuration that would be applied to the specified index from an existing index template. +```ts +client.indices.simulateIndexTemplate({ name }) +``` + +### Arguments [_arguments_indices.simulate_index_template] + +#### Request (object) [_request_indices.simulate_index_template] +- **`name` (string)**: Name of the index to simulate +- **`create` (Optional, boolean)**: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one +- **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + +## client.indices.simulateTemplate [_indices.simulate_template] +Simulate an index template. +Get the index configuration that would be applied by a particular index template. +```ts +client.indices.simulateTemplate({ ... }) +``` + +### Arguments [_arguments_indices.simulate_template] + +#### Request (object) [_request_indices.simulate_template] +- **`name` (Optional, string)**: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit +this parameter and specify the template configuration in the request body. +- **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +- **`index_patterns` (Optional, string \| string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. +- **`composed_of` (Optional, string[])**: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +- **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +- **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +- **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +- **`version` (Optional, number)**: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +- **`_meta` (Optional, Record)**: Optional user metadata about the index template. +May have any contents. +This map is not automatically generated by Elasticsearch. +- **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. +- **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + +## client.indices.split [_indices.split] +Split an index. +Split an index into a new index with more primary shards. +* Before you can split an index: + +* The index must be read-only. +* The cluster health status must be green. + +You can do make an index read-only with the following request using the add index block API: + +``` +PUT /my_source_index/_block/write +``` + +The current write index on a data stream cannot be split. +In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. + +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. +The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. +For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + +A split operation: + +* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. +* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. +* Recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be split if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have fewer primary shards than the target index. +* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. +* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. +```ts +client.indices.split({ index, target }) +``` + +### Arguments [_arguments_indices.split] + +#### Request (object) [_request_indices.split] +- **`index` (string)**: Name of the source index to split. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: Aliases for the resulting index. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.stats [_indices.stats] +Get index statistics. +For data streams, the API retrieves statistics for the stream's backing indices. + +By default, the returned statistics are index-level with `primaries` and `total` aggregations. +`primaries` are the values for only the primary shards. +`total` are the accumulated values for both primary and replica shards. + +To get shard-level statistics, set the `level` parameter to `shards`. + +NOTE: When moving to another node, the shard-level statistics for a shard are cleared. +Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. +```ts +client.indices.stats({ ... }) +``` + +### Arguments [_arguments_indices.stats] + +#### Request (object) [_request_indices.stats] +- **`metric` (Optional, string \| string[])**: Limit the information returned the specific metrics. +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as `open,hidden`. +- **`fielddata_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. +- **`forbid_closed_indices` (Optional, boolean)**: If true, statistics are not collected from closed indices. +- **`groups` (Optional, string \| string[])**: List of search groups to include in the search statistics. +- **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. + +## client.indices.unfreeze [_indices.unfreeze] +Unfreeze an index. +When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. +```ts +client.indices.unfreeze({ index }) +``` + +### Arguments [_arguments_indices.unfreeze] + +#### Request (object) [_request_indices.unfreeze] +- **`index` (string)**: Identifier for the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, string)**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.updateAliases [_indices.update_aliases] +Create or update an alias. +Adds a data stream or index to an alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-update-aliases) + +```ts +client.indices.updateAliases({ ... }) +``` + +### Arguments [_arguments_indices.update_aliases] + +#### Request (object) [_request_indices.update_aliases] +- **`actions` (Optional, { add_backing_index, remove_backing_index }[])**: Actions to perform. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.validateQuery [_indices.validate_query] +Validate a query. +Validates a query without running it. +```ts +client.indices.validateQuery({ ... }) +``` + +### Arguments [_arguments_indices.validate_query] + +#### Request (object) [_request_indices.validate_query] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query in the Lucene query string syntax. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`all_shards` (Optional, boolean)**: If `true`, the validation is executed on all shards instead of one random shard per index. +- **`analyzer` (Optional, string)**: Analyzer to use for the query string. +This parameter can only be used when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `AND` or `OR`. +- **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. +This parameter can only be used when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`explain` (Optional, boolean)**: If `true`, the response returns detailed information if an error has occurred. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +- **`rewrite` (Optional, boolean)**: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. +- **`q` (Optional, string)**: Query in the Lucene query string syntax. + +## client.inference.chatCompletionUnified [_inference.chat_completion_unified] +Perform chat completion inference + +The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. +It only works with the `chat_completion` task type for `openai` and `elastic` inference services. + +NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. +The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. +The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. +If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API. +```ts +client.inference.chatCompletionUnified({ inference_id }) +``` + +### Arguments [_arguments_inference.chat_completion_unified] + +#### Request (object) [_request_inference.chat_completion_unified] +- **`inference_id` (string)**: The inference Id +- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })** +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. + +## client.inference.completion [_inference.completion] +Perform completion inference on the service +```ts +client.inference.completion({ inference_id, input }) +``` + +### Arguments [_arguments_inference.completion] + +#### Request (object) [_request_inference.completion] +- **`inference_id` (string)**: The inference Id +- **`input` (string \| string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. + +## client.inference.delete [_inference.delete] +Delete an inference endpoint +```ts +client.inference.delete({ inference_id }) +``` + +### Arguments [_arguments_inference.delete] + +#### Request (object) [_request_inference.delete] +- **`inference_id` (string)**: The inference identifier. +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type +- **`dry_run` (Optional, boolean)**: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. +- **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. + +## client.inference.get [_inference.get] +Get an inference endpoint +```ts +client.inference.get({ ... }) +``` + +### Arguments [_arguments_inference.get] + +#### Request (object) [_request_inference.get] +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type +- **`inference_id` (Optional, string)**: The inference Id + +## client.inference.inference [_inference.inference] +Perform inference on the service. + +This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. +It returns a response with the results of the tasks. +The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. + +For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. + +> info +> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +```ts +client.inference.inference({ inference_id, input }) +``` + +### Arguments [_arguments_inference.inference] + +#### Request (object) [_request_inference.inference] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`input` (string \| string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. + +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The type of inference task that the model performs. +- **`query` (Optional, string)**: The query input, which is required only for the `rerank` task. +It is not required for other tasks. +- **`input_type` (Optional, string)**: Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include: +* `SEARCH` +* `INGEST` +* `CLASSIFICATION` +* `CLUSTERING` +Not all services support all values. Unsupported values will trigger a validation exception. +Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + +> info +> The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. + +## client.inference.put [_inference.put] +Create an inference endpoint. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +The following integrations are available through the inference API. You can find the available task types next to the integration name: +* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) +* Amazon Bedrock (`completion`, `text_embedding`) +* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) +* Anthropic (`completion`) +* Azure AI Studio (`completion`, `text_embedding`) +* Azure OpenAI (`completion`, `text_embedding`) +* Cohere (`completion`, `rerank`, `text_embedding`) +* DeepSeek (`chat_completion`, `completion`) +* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) +* ELSER (`sparse_embedding`) +* Google AI Studio (`completion`, `text_embedding`) +* Google Vertex AI (`chat_completion`, `completion`, `rerank`, `text_embedding`) +* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) +* JinaAI (`rerank`, `text_embedding`) +* Llama (`chat_completion`, `completion`, `text_embedding`) +* Mistral (`chat_completion`, `completion`, `text_embedding`) +* OpenAI (`chat_completion`, `completion`, `text_embedding`) +* VoyageAI (`rerank`, `text_embedding`) +* Watsonx inference integration (`text_embedding`) +```ts +client.inference.put({ inference_id }) +``` + +### Arguments [_arguments_inference.put] + +#### Request (object) [_request_inference.put] +- **`inference_id` (string)**: The inference Id +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type. Refer to the integration list in the API description for the available task types. +- **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAlibabacloud [_inference.put_alibabacloud] +Create an AlibabaCloud AI Search inference endpoint. + +Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. +```ts +client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_alibabacloud] + +#### Request (object) [_request_inference.put_alibabacloud] +- **`task_type` (Enum("completion" \| "rerank" \| "space_embedding" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`alibabacloud_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("alibabacloud-ai-search"))**: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. +- **`service_settings` ({ api_key, host, rate_limit, service_id, workspace })**: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_token })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAmazonbedrock [_inference.put_amazonbedrock] +Create an Amazon Bedrock inference endpoint. + +Create an inference endpoint to perform an inference task with the `amazonbedrock` service. + +>info +> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. +```ts +client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_amazonbedrock] + +#### Request (object) [_request_inference.put_amazonbedrock] +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`amazonbedrock_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("amazonbedrock"))**: The type of service supported for the specified task type. In this case, `amazonbedrock`. +- **`service_settings` ({ access_key, model, provider, region, rate_limit, secret_key })**: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAmazonsagemaker [_inference.put_amazonsagemaker] +Create an Amazon SageMaker inference endpoint. + +Create an inference endpoint to perform an inference task with the `amazon_sagemaker` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker) + +```ts +client.inference.putAmazonsagemaker({ task_type, amazonsagemaker_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_amazonsagemaker] + +#### Request (object) [_request_inference.put_amazonsagemaker] +- **`task_type` (Enum("text_embedding" \| "completion" \| "chat_completion" \| "sparse_embedding" \| "rerank"))**: The type of the inference task that the model will perform. +- **`amazonsagemaker_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("amazon_sagemaker"))**: The type of service supported for the specified task type. In this case, `amazon_sagemaker`. +- **`service_settings` ({ access_key, endpoint_name, api, region, secret_key, target_model, target_container_hostname, inference_component_name, batch_size, dimensions })**: Settings used to install the inference model. +These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { custom_attributes, enable_explanations, inference_id, session_id, target_variant })**: Settings to configure the inference task. +These settings are specific to the task type and `service_settings.api` you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAnthropic [_inference.put_anthropic] +Create an Anthropic inference endpoint. + +Create an inference endpoint to perform an inference task with the `anthropic` service. +```ts +client.inference.putAnthropic({ task_type, anthropic_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_anthropic] + +#### Request (object) [_request_inference.put_anthropic] +- **`task_type` (Enum("completion"))**: The task type. +The only valid task type for the model to perform is `completion`. +- **`anthropic_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("anthropic"))**: The type of service supported for the specified task type. In this case, `anthropic`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAzureaistudio [_inference.put_azureaistudio] +Create an Azure AI studio inference endpoint. + +Create an inference endpoint to perform an inference task with the `azureaistudio` service. +```ts +client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_azureaistudio] + +#### Request (object) [_request_inference.put_azureaistudio] +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`azureaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("azureaistudio"))**: The type of service supported for the specified task type. In this case, `azureaistudio`. +- **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `openai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAzureopenai [_inference.put_azureopenai] +Create an Azure OpenAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `azureopenai` service. + +The list of chat completion models that you can choose from in your Azure OpenAI deployment include: + +* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) +* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) + +The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). +```ts +client.inference.putAzureopenai({ task_type, azureopenai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_azureopenai] + +#### Request (object) [_request_inference.put_azureopenai] +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`azureopenai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("azureopenai"))**: The type of service supported for the specified task type. In this case, `azureopenai`. +- **`service_settings` ({ api_key, api_version, deployment_id, entra_id, rate_limit, resource_name })**: Settings used to install the inference model. These settings are specific to the `azureopenai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putCohere [_inference.put_cohere] +Create a Cohere inference endpoint. + +Create an inference endpoint to perform an inference task with the `cohere` service. +```ts +client.inference.putCohere({ task_type, cohere_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_cohere] + +#### Request (object) [_request_inference.put_cohere] +- **`task_type` (Enum("completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`cohere_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("cohere"))**: The type of service supported for the specified task type. In this case, `cohere`. +- **`service_settings` ({ api_key, embedding_type, model_id, rate_limit, similarity })**: Settings used to install the inference model. +These settings are specific to the `cohere` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putCustom [_inference.put_custom] +Create a custom inference endpoint. + +The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. +The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. +The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. +Templates are portions of a string that start with `${` and end with `}`. +The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. +If the definition (key) is not found for a template, an error message is returned. +In case of an endpoint definition like the following: +``` +PUT _inference/text_embedding/test-text-embedding +{ + "service": "custom", + "service_settings": { + "secret_parameters": { + "api_key": "" + }, + "url": "...endpoints.huggingface.cloud/v1/embeddings", + "headers": { + "Authorization": "Bearer ${api_key}", + "Content-Type": "application/json" + }, + "request": "{\"input\": ${input}}", + "response": { + "json_parser": { + "text_embeddings":"$.data[*].embedding[*]" + } + } + } +} +``` +To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. + +> info +> Templates should not be surrounded by quotes. + +Pre-defined templates: +* `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. +* `${input_type}` refers to the input type translation values. +* `${query}` refers to the query field used specifically for reranking tasks. +* `${top_n}` refers to the `top_n` field available when performing rerank requests. +* `${return_documents}` refers to the `return_documents` field available when performing rerank requests. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom) + +```ts +client.inference.putCustom({ task_type, custom_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_custom] + +#### Request (object) [_request_inference.put_custom] +- **`task_type` (Enum("text_embedding" \| "sparse_embedding" \| "rerank" \| "completion"))**: The type of the inference task that the model will perform. +- **`custom_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("custom"))**: The type of service supported for the specified task type. In this case, `custom`. +- **`service_settings` ({ headers, input_type, query_parameters, request, response, secret_parameters, url })**: Settings used to install the inference model. +These settings are specific to the `custom` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { parameters })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putDeepseek [_inference.put_deepseek] +Create a DeepSeek inference endpoint. + +Create an inference endpoint to perform an inference task with the `deepseek` service. +```ts +client.inference.putDeepseek({ task_type, deepseek_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_deepseek] + +#### Request (object) [_request_inference.put_deepseek] +- **`task_type` (Enum("completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`deepseek_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("deepseek"))**: The type of service supported for the specified task type. In this case, `deepseek`. +- **`service_settings` ({ api_key, model_id, url })**: Settings used to install the inference model. +These settings are specific to the `deepseek` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putElasticsearch [_inference.put_elasticsearch] +Create an Elasticsearch inference endpoint. + +Create an inference endpoint to perform an inference task with the `elasticsearch` service. + +> info +> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. + +If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. + +> info +> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. + +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +```ts +client.inference.putElasticsearch({ task_type, elasticsearch_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_elasticsearch] + +#### Request (object) [_request_inference.put_elasticsearch] +- **`task_type` (Enum("rerank" \| "sparse_embedding" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`elasticsearch_inference_id` (string)**: The unique identifier of the inference endpoint. +The must not match the `model_id`. +- **`service` (Enum("elasticsearch"))**: The type of service supported for the specified task type. In this case, `elasticsearch`. +- **`service_settings` ({ adaptive_allocations, deployment_id, model_id, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putElser [_inference.put_elser] +Create an ELSER inference endpoint. + +Create an inference endpoint to perform an inference task with the `elser` service. +You can also deploy ELSER by using the Elasticsearch inference integration. + +> info +> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. + +The API request will automatically download and deploy the ELSER model if it isn't already downloaded. + +> info +> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. + +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +```ts +client.inference.putElser({ task_type, elser_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_elser] + +#### Request (object) [_request_inference.put_elser] +- **`task_type` (Enum("sparse_embedding"))**: The type of the inference task that the model will perform. +- **`elser_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("elser"))**: The type of service supported for the specified task type. In this case, `elser`. +- **`service_settings` ({ adaptive_allocations, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elser` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putGoogleaistudio [_inference.put_googleaistudio] +Create an Google AI Studio inference endpoint. + +Create an inference endpoint to perform an inference task with the `googleaistudio` service. +```ts +client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_googleaistudio] + +#### Request (object) [_request_inference.put_googleaistudio] +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`googleaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putGooglevertexai [_inference.put_googlevertexai] +Create a Google Vertex AI inference endpoint. + +Create an inference endpoint to perform an inference task with the `googlevertexai` service. +```ts +client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_googlevertexai] + +#### Request (object) [_request_inference.put_googlevertexai] +- **`task_type` (Enum("rerank" \| "text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. +- **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { auto_truncate, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putHuggingFace [_inference.put_hugging_face] +Create a Hugging Face inference endpoint. + +Create an inference endpoint to perform an inference task with the `hugging_face` service. +Supported tasks include: `text_embedding`, `completion`, and `chat_completion`. + +To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. +Select a model that supports the task you intend to use. + +For Elastic's `text_embedding` task: +The selected model must support the `Sentence Embeddings` task. On the new endpoint creation page, select the `Sentence Embeddings` task under the `Advanced Configuration` section. +After the endpoint has initialized, copy the generated endpoint URL. +Recommended models for `text_embedding` task: + +* `all-MiniLM-L6-v2` +* `all-MiniLM-L12-v2` +* `all-mpnet-base-v2` +* `e5-base-v2` +* `e5-small-v2` +* `multilingual-e5-base` +* `multilingual-e5-small` + +For Elastic's `chat_completion` and `completion` tasks: +The selected model must support the `Text Generation` task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for `Text Generation`. When creating dedicated endpoint select the `Text Generation` task. +After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes `/v1/chat/completions` part in URL. Then, copy the full endpoint URL for use. +Recommended models for `chat_completion` and `completion` tasks: + +* `Mistral-7B-Instruct-v0.2` +* `QwQ-32B` +* `Phi-3-mini-128k-instruct` + +For Elastic's `rerank` task: +The selected model must support the `sentence-ranking` task and expose OpenAI API. +HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` so far. +After the endpoint is initialized, copy the full endpoint URL for use. +Tested models for `rerank` task: + +* `bge-reranker-base` +* `jina-reranker-v1-turbo-en-GGUF` +```ts +client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_hugging_face] + +#### Request (object) [_request_inference.put_hugging_face] +- **`task_type` (Enum("chat_completion" \| "completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`huggingface_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("hugging_face"))**: The type of service supported for the specified task type. In this case, `hugging_face`. +- **`service_settings` ({ api_key, rate_limit, url, model_id })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { return_documents, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putJinaai [_inference.put_jinaai] +Create an JinaAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `jinaai` service. + +To review the available `rerank` models, refer to . +To review the available `text_embedding` models, refer to the . +```ts +client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_jinaai] + +#### Request (object) [_request_inference.put_jinaai] +- **`task_type` (Enum("rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`jinaai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("jinaai"))**: The type of service supported for the specified task type. In this case, `jinaai`. +- **`service_settings` ({ api_key, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `jinaai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { return_documents, task, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putMistral [_inference.put_mistral] +Create a Mistral inference endpoint. + +Create an inference endpoint to perform an inference task with the `mistral` service. +```ts +client.inference.putMistral({ task_type, mistral_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_mistral] + +#### Request (object) [_request_inference.put_mistral] +- **`task_type` (Enum("text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`mistral_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. +- **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putOpenai [_inference.put_openai] +Create an OpenAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. +```ts +client.inference.putOpenai({ task_type, openai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_openai] + +#### Request (object) [_request_inference.put_openai] +- **`task_type` (Enum("chat_completion" \| "completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`openai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("openai"))**: The type of service supported for the specified task type. In this case, `openai`. +- **`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `openai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putVoyageai [_inference.put_voyageai] +Create a VoyageAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `voyageai` service. + +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +```ts +client.inference.putVoyageai({ task_type, voyageai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_voyageai] + +#### Request (object) [_request_inference.put_voyageai] +- **`task_type` (Enum("text_embedding" \| "rerank"))**: The type of the inference task that the model will perform. +- **`voyageai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("voyageai"))**: The type of service supported for the specified task type. In this case, `voyageai`. +- **`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })**: Settings used to install the inference model. These settings are specific to the `voyageai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putWatsonx [_inference.put_watsonx] +Create a Watsonx inference endpoint. + +Create an inference endpoint to perform an inference task with the `watsonxai` service. +You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. +You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. +```ts +client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_watsonx] + +#### Request (object) [_request_inference.put_watsonx] +- **`task_type` (Enum("text_embedding"))**: The task type. +The only valid task type for the model to perform is `text_embedding`. +- **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("watsonxai"))**: The type of service supported for the specified task type. In this case, `watsonxai`. +- **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.rerank [_inference.rerank] +Perform reranking inference on the service +```ts +client.inference.rerank({ inference_id, query, input }) +``` + +### Arguments [_arguments_inference.rerank] + +#### Request (object) [_request_inference.rerank] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`query` (string)**: Query input. +- **`input` (string \| string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. + +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. + +## client.inference.sparseEmbedding [_inference.sparse_embedding] +Perform sparse embedding inference on the service +```ts +client.inference.sparseEmbedding({ inference_id, input }) +``` + +### Arguments [_arguments_inference.sparse_embedding] + +#### Request (object) [_request_inference.sparse_embedding] +- **`inference_id` (string)**: The inference Id +- **`input` (string \| string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. + +## client.inference.streamCompletion [_inference.stream_completion] +Perform streaming inference. +Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +This API works only with the completion task type. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. +```ts +client.inference.streamCompletion({ inference_id, input }) +``` + +### Arguments [_arguments_inference.stream_completion] + +#### Request (object) [_request_inference.stream_completion] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`input` (string \| string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. + +NOTE: Inference endpoints for the completion task type currently only support a single string as input. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. + +## client.inference.textEmbedding [_inference.text_embedding] +Perform text embedding inference on the service +```ts +client.inference.textEmbedding({ inference_id, input }) +``` + +### Arguments [_arguments_inference.text_embedding] + +#### Request (object) [_request_inference.text_embedding] +- **`inference_id` (string)**: The inference Id +- **`input` (string \| string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. + +## client.inference.update [_inference.update] +Update an inference endpoint. + +Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +```ts +client.inference.update({ inference_id }) +``` + +### Arguments [_arguments_inference.update] + +#### Request (object) [_request_inference.update] +- **`inference_id` (string)**: The unique identifier of the inference endpoint. +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The type of inference task that the model performs. +- **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** + +## client.ingest.deleteGeoipDatabase [_ingest.delete_geoip_database] +Delete GeoIP database configurations. + +Delete one or more IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-delete-geoip-database) + +```ts +client.ingest.deleteGeoipDatabase({ id }) +``` + +### Arguments [_arguments_ingest.delete_geoip_database] + +#### Request (object) [_request_ingest.delete_geoip_database] +- **`id` (string \| string[])**: A list of geoip database configurations to delete +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ingest.deleteIpLocationDatabase [_ingest.delete_ip_location_database] +Delete IP geolocation database configurations. +```ts +client.ingest.deleteIpLocationDatabase({ id }) +``` + +### Arguments [_arguments_ingest.delete_ip_location_database] + +#### Request (object) [_request_ingest.delete_ip_location_database] +- **`id` (string \| string[])**: A list of IP location database configurations. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. + +## client.ingest.deletePipeline [_ingest.delete_pipeline] +Delete pipelines. +Delete one or more ingest pipelines. +```ts +client.ingest.deletePipeline({ id }) +``` + +### Arguments [_arguments_ingest.delete_pipeline] + +#### Request (object) [_request_ingest.delete_pipeline] +- **`id` (string)**: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. +To delete all ingest pipelines in a cluster, use a value of `*`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.ingest.geoIpStats [_ingest.geo_ip_stats] +Get GeoIP statistics. +Get download statistics for GeoIP2 databases that are used with the GeoIP processor. +```ts +client.ingest.geoIpStats() +``` + + +## client.ingest.getGeoipDatabase [_ingest.get_geoip_database] +Get GeoIP database configurations. + +Get information about one or more IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-get-geoip-database) + +```ts +client.ingest.getGeoipDatabase({ ... }) +``` + +### Arguments [_arguments_ingest.get_geoip_database] + +#### Request (object) [_request_ingest.get_geoip_database] +- **`id` (Optional, string \| string[])**: A list of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. + +## client.ingest.getIpLocationDatabase [_ingest.get_ip_location_database] +Get IP geolocation database configurations. +```ts +client.ingest.getIpLocationDatabase({ ... }) +``` + +### Arguments [_arguments_ingest.get_ip_location_database] + +#### Request (object) [_request_ingest.get_ip_location_database] +- **`id` (Optional, string \| string[])**: List of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. + +## client.ingest.getPipeline [_ingest.get_pipeline] +Get pipelines. + +Get information about one or more ingest pipelines. +This API returns a local reference of the pipeline. +```ts +client.ingest.getPipeline({ ... }) +``` + +### Arguments [_arguments_ingest.get_pipeline] + +#### Request (object) [_request_ingest.get_pipeline] +- **`id` (Optional, string)**: List of pipeline IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all ingest pipelines, omit this parameter or use `*`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) + +## client.ingest.processorGrok [_ingest.processor_grok] +Run a grok processor. +Extract structured fields out of a single text field within a document. +You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. +A grok pattern is like a regular expression that supports aliased expressions that can be reused. +```ts +client.ingest.processorGrok() +``` + + +## client.ingest.putGeoipDatabase [_ingest.put_geoip_database] +Create or update a GeoIP database configuration. + +Refer to the create or update IP geolocation database configuration API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-put-geoip-database) + +```ts +client.ingest.putGeoipDatabase({ id, name, maxmind }) +``` + +### Arguments [_arguments_ingest.put_geoip_database] + +#### Request (object) [_request_ingest.put_geoip_database] +- **`id` (string)**: ID of the database configuration to create or update. +- **`name` (string)**: The provider-assigned name of the IP geolocation database to download. +- **`maxmind` ({ account_id })**: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. +At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ingest.putIpLocationDatabase [_ingest.put_ip_location_database] +Create or update an IP geolocation database configuration. +```ts +client.ingest.putIpLocationDatabase({ id }) +``` + +### Arguments [_arguments_ingest.put_ip_location_database] + +#### Request (object) [_request_ingest.put_ip_location_database] +- **`id` (string)**: The database configuration identifier. +- **`configuration` (Optional, { name, maxmind, ipinfo })** +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. +A value of `-1` indicates that the request should never time out. + +## client.ingest.putPipeline [_ingest.put_pipeline] +Create or update a pipeline. +Changes made using this API take effect immediately. +```ts +client.ingest.putPipeline({ id }) +``` + +### Arguments [_arguments_ingest.put_pipeline] + +#### Request (object) [_request_ingest.put_pipeline] +- **`id` (string)**: ID of the ingest pipeline to create or update. +- **`_meta` (Optional, Record)**: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. +- **`description` (Optional, string)**: Description of the ingest pipeline. +- **`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +- **`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +- **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. +- **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. +When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates + +## client.ingest.simulate [_ingest.simulate] +Simulate a pipeline. + +Run an ingest pipeline against a set of provided documents. +You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. +```ts +client.ingest.simulate({ docs }) +``` + +### Arguments [_arguments_ingest.simulate] + +#### Request (object) [_request_ingest.simulate] +- **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. +- **`id` (Optional, string)**: The pipeline to test. +If you don't specify a `pipeline` in the request body, this parameter is required. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })**: The pipeline to test. +If you don't specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +- **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. + +## client.license.delete [_license.delete] +Delete the license. + +When the license expires, your subscription level reverts to Basic. + +If the operator privileges feature is enabled, only operator users can use this API. +```ts +client.license.delete({ ... }) +``` + +### Arguments [_arguments_license.delete] + +#### Request (object) [_request_license.delete] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.license.get [_license.get] +Get license information. + +Get information about your Elastic license including its type, its status, when it was issued, and when it expires. + +>info +> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. +```ts +client.license.get({ ... }) +``` + +### Arguments [_arguments_license.get] + +#### Request (object) [_request_license.get] +- **`accept_enterprise` (Optional, boolean)**: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. +This parameter is deprecated and will always be set to true in 8.x. +- **`local` (Optional, boolean)**: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. + +## client.license.getBasicStatus [_license.get_basic_status] +Get the basic license status. +```ts +client.license.getBasicStatus() +``` + + +## client.license.getTrialStatus [_license.get_trial_status] +Get the trial status. +```ts +client.license.getTrialStatus() +``` + + +## client.license.post [_license.post] +Update the license. + +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. +If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. +You must then re-submit the API request with the acknowledge parameter set to true. + +NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. +If the operator privileges feature is enabled, only operator users can use this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-license-post) + +```ts +client.license.post({ ... }) +``` + +### Arguments [_arguments_license.post] + +#### Request (object) [_request_license.post] +- **`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })** +- **`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])**: A sequence of one or more JSON documents containing the license information. +- **`acknowledge` (Optional, boolean)**: Specifies whether you acknowledge the license changes. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.license.postStartBasic [_license.post_start_basic] +Start a basic license. + +Start an indefinite basic license, which gives access to all the basic features. + +NOTE: In order to start a basic license, you must not currently have a basic license. + +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. +You must then re-submit the API request with the `acknowledge` parameter set to `true`. + +To check the status of your basic license, use the get basic license API. +```ts +client.license.postStartBasic({ ... }) +``` + +### Arguments [_arguments_license.post_start_basic] + +#### Request (object) [_request_license.post_start_basic] +- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.license.postStartTrial [_license.post_start_trial] +Start a trial. +Start a 30-day trial, which gives access to all subscription features. + +NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. +For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + +To check the status of your trial, use the get trial status API. +```ts +client.license.postStartTrial({ ... }) +``` + +### Arguments [_arguments_license.post_start_trial] + +#### Request (object) [_request_license.post_start_trial] +- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`type` (Optional, string)**: The type of trial license to generate (default: "trial") +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.logstash.deletePipeline [_logstash.delete_pipeline] +Delete a Logstash pipeline. +Delete a pipeline that is used for Logstash Central Management. +If the request succeeds, you receive an empty response with an appropriate status code. +```ts +client.logstash.deletePipeline({ id }) +``` + +### Arguments [_arguments_logstash.delete_pipeline] + +#### Request (object) [_request_logstash.delete_pipeline] +- **`id` (string)**: An identifier for the pipeline. + +## client.logstash.getPipeline [_logstash.get_pipeline] +Get Logstash pipelines. +Get pipelines that are used for Logstash Central Management. +```ts +client.logstash.getPipeline({ ... }) +``` + +### Arguments [_arguments_logstash.get_pipeline] + +#### Request (object) [_request_logstash.get_pipeline] +- **`id` (Optional, string \| string[])**: A list of pipeline identifiers. + +## client.logstash.putPipeline [_logstash.put_pipeline] +Create or update a Logstash pipeline. + +Create a pipeline that is used for Logstash Central Management. +If the specified pipeline exists, it is replaced. +```ts +client.logstash.putPipeline({ id }) +``` + +### Arguments [_arguments_logstash.put_pipeline] + +#### Request (object) [_request_logstash.put_pipeline] +- **`id` (string)**: An identifier for the pipeline. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })** + +## client.migration.deprecations [_migration.deprecations] +Get deprecation information. +Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + +TIP: This APIs is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. +```ts +client.migration.deprecations({ ... }) +``` + +### Arguments [_arguments_migration.deprecations] + +#### Request (object) [_request_migration.deprecations] +- **`index` (Optional, string)**: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. + +## client.migration.getFeatureUpgradeStatus [_migration.get_feature_upgrade_status] +Get feature migration information. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +Check which features need to be migrated and the status of any migrations that are in progress. + +TIP: This API is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. +```ts +client.migration.getFeatureUpgradeStatus() +``` + + +## client.migration.postFeatureUpgrade [_migration.post_feature_upgrade] +Start the feature migration. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +This API starts the automatic migration process. + +Some functionality might be temporarily unavailable during the migration process. + +TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. +```ts +client.migration.postFeatureUpgrade() +``` + + +## client.ml.clearTrainedModelDeploymentCache [_ml.clear_trained_model_deployment_cache] +Clear trained model deployment cache. + +Cache will be cleared on all nodes where the trained model is assigned. +A trained model deployment may have an inference cache enabled. +As requests are handled by each allocated node, their responses may be cached on that individual node. +Calling this API clears the caches without restarting the deployment. +```ts +client.ml.clearTrainedModelDeploymentCache({ model_id }) +``` + +### Arguments [_arguments_ml.clear_trained_model_deployment_cache] + +#### Request (object) [_request_ml.clear_trained_model_deployment_cache] +- **`model_id` (string)**: The unique identifier of the trained model. + +## client.ml.closeJob [_ml.close_job] +Close anomaly detection jobs. + +A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. +When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. +If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. +When a datafeed that has a specified end date stops, it automatically closes its associated job. +```ts +client.ml.closeJob({ job_id }) +``` + +### Arguments [_arguments_ml.close_job] + +#### Request (object) [_request_ml.close_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`force` (Optional, boolean)**: Refer to the descriptiion for the `force` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. + +## client.ml.deleteCalendar [_ml.delete_calendar] +Delete a calendar. + +Remove all scheduled events from a calendar, then delete it. +```ts +client.ml.deleteCalendar({ calendar_id }) +``` + +### Arguments [_arguments_ml.delete_calendar] + +#### Request (object) [_request_ml.delete_calendar] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. + +## client.ml.deleteCalendarEvent [_ml.delete_calendar_event] +Delete events from a calendar. +```ts +client.ml.deleteCalendarEvent({ calendar_id, event_id }) +``` + +### Arguments [_arguments_ml.delete_calendar_event] + +#### Request (object) [_request_ml.delete_calendar_event] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`event_id` (string)**: Identifier for the scheduled event. +You can obtain this identifier by using the get calendar events API. + +## client.ml.deleteCalendarJob [_ml.delete_calendar_job] +Delete anomaly jobs from a calendar. +```ts +client.ml.deleteCalendarJob({ calendar_id, job_id }) +``` + +### Arguments [_arguments_ml.delete_calendar_job] + +#### Request (object) [_request_ml.delete_calendar_job] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_id` (string \| string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a +list of jobs or groups. + +## client.ml.deleteDataFrameAnalytics [_ml.delete_data_frame_analytics] +Delete a data frame analytics job. +```ts +client.ml.deleteDataFrameAnalytics({ id }) +``` + +### Arguments [_arguments_ml.delete_data_frame_analytics] + +#### Request (object) [_request_ml.delete_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. +- **`force` (Optional, boolean)**: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the job to be deleted. + +## client.ml.deleteDatafeed [_ml.delete_datafeed] +Delete a datafeed. +```ts +client.ml.deleteDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.delete_datafeed] + +#### Request (object) [_request_ml.delete_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This +identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It must start and end with alphanumeric +characters. +- **`force` (Optional, boolean)**: Use to forcefully delete a started datafeed; this method is quicker than +stopping and deleting the datafeed. + +## client.ml.deleteExpiredData [_ml.delete_expired_data] +Delete expired ML data. + +Delete all job results, model snapshots and forecast data that have exceeded +their retention days period. Machine learning state documents that are not +associated with any job are also deleted. +You can limit the request to a single or set of anomaly detection jobs by +using a job identifier, a group name, a list of jobs, or a +wildcard expression. You can delete expired data for all anomaly detection +jobs by using `_all`, by specifying `*` as the ``, or by omitting the +``. +```ts +client.ml.deleteExpiredData({ ... }) +``` + +### Arguments [_arguments_ml.delete_expired_data] + +#### Request (object) [_request_ml.delete_expired_data] +- **`job_id` (Optional, string)**: Identifier for an anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. +- **`requests_per_second` (Optional, float)**: The desired requests per second for the deletion processes. The default +behavior is no throttling. +- **`timeout` (Optional, string \| -1 \| 0)**: How long can the underlying delete processes run until they are canceled. + +## client.ml.deleteFilter [_ml.delete_filter] +Delete a filter. + +If an anomaly detection job references the filter, you cannot delete the +filter. You must update or delete the job before you can delete the filter. +```ts +client.ml.deleteFilter({ filter_id }) +``` + +### Arguments [_arguments_ml.delete_filter] + +#### Request (object) [_request_ml.delete_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. + +## client.ml.deleteForecast [_ml.delete_forecast] +Delete forecasts from a job. + +By default, forecasts are retained for 14 days. You can specify a +different retention period with the `expires_in` parameter in the forecast +jobs API. The delete forecast API enables you to delete one or more +forecasts before they expire. +```ts +client.ml.deleteForecast({ job_id }) +``` + +### Arguments [_arguments_ml.delete_forecast] + +#### Request (object) [_request_ml.delete_forecast] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`forecast_id` (Optional, string)**: A list of forecast identifiers. If you do not specify +this optional parameter or if you specify `_all` or `*` the API deletes +all forecasts from the job. +- **`allow_no_forecasts` (Optional, boolean)**: Specifies whether an error occurs when there are no forecasts. In +particular, if this parameter is set to `false` and there are no +forecasts associated with the job, attempts to delete all forecasts +return an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the period of time to wait for the completion of the delete +operation. When this period of time elapses, the API fails and returns an +error. + +## client.ml.deleteJob [_ml.delete_job] +Delete an anomaly detection job. + +All job configuration, model state and results are deleted. +It is not currently possible to delete multiple jobs using wildcards or a +comma separated list. If you delete a job that has a datafeed, the request +first tries to delete the datafeed. This behavior is equivalent to calling +the delete datafeed API with the same timeout and force parameters as the +delete job request. +```ts +client.ml.deleteJob({ job_id }) +``` + +### Arguments [_arguments_ml.delete_job] + +#### Request (object) [_request_ml.delete_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`force` (Optional, boolean)**: Use to forcefully delete an opened job; this method is quicker than +closing and deleting the job. +- **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. +- **`wait_for_completion` (Optional, boolean)**: Specifies whether the request should return immediately or wait until the +job deletion completes. + +## client.ml.deleteModelSnapshot [_ml.delete_model_snapshot] +Delete a model snapshot. + +You cannot delete the active model snapshot. To delete that snapshot, first +revert to a different one. To identify the active model snapshot, refer to +the `model_snapshot_id` in the results from the get jobs API. +```ts +client.ml.deleteModelSnapshot({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.delete_model_snapshot] + +#### Request (object) [_request_ml.delete_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: Identifier for the model snapshot. + +## client.ml.deleteTrainedModel [_ml.delete_trained_model] +Delete an unreferenced trained model. + +The request deletes a trained inference model that is not referenced by an ingest pipeline. +```ts +client.ml.deleteTrainedModel({ model_id }) +``` + +### Arguments [_arguments_ml.delete_trained_model] + +#### Request (object) [_request_ml.delete_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`force` (Optional, boolean)**: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ml.deleteTrainedModelAlias [_ml.delete_trained_model_alias] +Delete a trained model alias. + +This API deletes an existing model alias that refers to a trained model. If +the model alias is missing or refers to a model other than the one identified +by the `model_id`, this API returns an error. +```ts +client.ml.deleteTrainedModelAlias({ model_alias, model_id }) +``` + +### Arguments [_arguments_ml.delete_trained_model_alias] + +#### Request (object) [_request_ml.delete_trained_model_alias] +- **`model_alias` (string)**: The model alias to delete. +- **`model_id` (string)**: The trained model ID to which the model alias refers. + +## client.ml.estimateModelMemory [_ml.estimate_model_memory] +Estimate job model memory usage. + +Make an estimation of the memory usage for an anomaly detection job model. +The estimate is based on analysis configuration details for the job and cardinality +estimates for the fields it references. +```ts +client.ml.estimateModelMemory({ ... }) +``` + +### Arguments [_arguments_ml.estimate_model_memory] + +#### Request (object) [_request_ml.estimate_model_memory] +- **`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: For a list of the properties that you can specify in the +`analysis_config` component of the body of this API. +- **`max_bucket_cardinality` (Optional, Record)**: Estimates of the highest cardinality in a single bucket that is observed +for influencer fields over the time period that the job analyzes data. +To produce a good answer, values must be provided for all influencer +fields. Providing values for fields that are not listed as `influencers` +has no effect on the estimation. +- **`overall_cardinality` (Optional, Record)**: Estimates of the cardinality that is observed for fields over the whole +time period that the job analyzes data. To produce a good answer, values +must be provided for fields referenced in the `by_field_name`, +`over_field_name` and `partition_field_name` of any detectors. Providing +values for other fields has no effect on the estimation. It can be +omitted from the request if no detectors have a `by_field_name`, +`over_field_name` or `partition_field_name`. + +## client.ml.evaluateDataFrame [_ml.evaluate_data_frame] +Evaluate data frame analytics. + +The API packages together commonly used evaluation metrics for various types +of machine learning features. This has been designed for use on indexes +created by data frame analytics. Evaluation requires both a ground truth +field and an analytics result field to be present. +```ts +client.ml.evaluateDataFrame({ evaluation, index }) +``` + +### Arguments [_arguments_ml.evaluate_data_frame] + +#### Request (object) [_request_ml.evaluate_data_frame] +- **`evaluation` ({ classification, outlier_detection, regression })**: Defines the type of evaluation you want to perform. +- **`index` (string)**: Defines the `index` in which the evaluation will be performed. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query clause that retrieves a subset of data from the source index. + +## client.ml.explainDataFrameAnalytics [_ml.explain_data_frame_analytics] +Explain data frame analytics config. + +This API provides explanations for a data frame analytics config that either +exists already or one that has not been created yet. The following +explanations are provided: +* which fields are included or not in the analysis and why, +* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. +If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. +```ts +client.ml.explainDataFrameAnalytics({ ... }) +``` + +### Arguments [_arguments_ml.explain_data_frame_analytics] + +#### Request (object) [_request_ml.explain_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`source` (Optional, { index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. It requires an +index. Optionally, query and _source may be specified. +- **`dest` (Optional, { index, results_field })**: The destination configuration, consisting of index and optionally +results_field (ml by default). +- **`analysis` (Optional, { classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +- **`description` (Optional, string)**: A description of the job. +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`analyzed_fields` (Optional, { includes, excludes })**: Specify includes and/or excludes patterns to select which fields will be +included in the analysis. The patterns specified in excludes are applied +last, therefore excludes takes precedence. In other words, if the same +field is specified in both includes and excludes, then the field will not +be included in the analysis. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +## client.ml.flushJob [_ml.flush_job] +Force buffered data to be processed. +The flush jobs API is only applicable when sending data for analysis using +the post data API. Depending on the content of the buffer, then it might +additionally calculate new results. Both flush and close operations are +similar, however the flush is more efficient if you are expecting to send +more data for analysis. When flushing, the job remains open and is available +to continue analyzing data. A close operation additionally prunes and +persists the model state to disk and the job must be opened again before +analyzing further data. +```ts +client.ml.flushJob({ job_id }) +``` + +### Arguments [_arguments_ml.flush_job] + +#### Request (object) [_request_ml.flush_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`advance_time` (Optional, string \| Unit)**: Refer to the description for the `advance_time` query parameter. +- **`calc_interim` (Optional, boolean)**: Refer to the description for the `calc_interim` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`skip_time` (Optional, string \| Unit)**: Refer to the description for the `skip_time` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. + +## client.ml.forecast [_ml.forecast] +Predict future behavior of a time series. + +Forecasts are not supported for jobs that perform population analysis; an +error occurs if you try to create a forecast for a job that has an +`over_field_name` in its configuration. Forcasts predict future behavior +based on historical data. +```ts +client.ml.forecast({ job_id }) +``` + +### Arguments [_arguments_ml.forecast] + +#### Request (object) [_request_ml.forecast] +- **`job_id` (string)**: Identifier for the anomaly detection job. The job must be open when you +create a forecast; otherwise, an error occurs. +- **`duration` (Optional, string \| -1 \| 0)**: Refer to the description for the `duration` query parameter. +- **`expires_in` (Optional, string \| -1 \| 0)**: Refer to the description for the `expires_in` query parameter. +- **`max_model_memory` (Optional, string)**: Refer to the description for the `max_model_memory` query parameter. + +## client.ml.getBuckets [_ml.get_buckets] +Get anomaly detection job results for buckets. +The API presents a chronological view of the records, grouped by bucket. +```ts +client.ml.getBuckets({ job_id }) +``` + +### Arguments [_arguments_ml.get_buckets] + +#### Request (object) [_request_ml.get_buckets] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`timestamp` (Optional, string \| Unit)**: The timestamp of a single bucket result. If you do not specify this +parameter, the API returns information about all buckets. +- **`anomaly_score` (Optional, number)**: Refer to the description for the `anomaly_score` query parameter. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`expand` (Optional, boolean)**: Refer to the description for the `expand` query parameter. +- **`page` (Optional, { from, size })** +- **`sort` (Optional, string)**: Refer to the desription for the `sort` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of buckets. +- **`size` (Optional, number)**: Specifies the maximum number of buckets to obtain. + +## client.ml.getCalendarEvents [_ml.get_calendar_events] +Get info about events in calendars. +```ts +client.ml.getCalendarEvents({ calendar_id }) +``` + +### Arguments [_arguments_ml.get_calendar_events] + +#### Request (object) [_request_ml.get_calendar_events] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +- **`end` (Optional, string \| Unit)**: Specifies to get events with timestamps earlier than this time. +- **`from` (Optional, number)**: Skips the specified number of events. +- **`job_id` (Optional, string)**: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. +- **`size` (Optional, number)**: Specifies the maximum number of events to obtain. +- **`start` (Optional, string \| Unit)**: Specifies to get events with timestamps after this time. + +## client.ml.getCalendars [_ml.get_calendars] +Get calendar configuration info. +```ts +client.ml.getCalendars({ ... }) +``` + +### Arguments [_arguments_ml.get_calendars] + +#### Request (object) [_request_ml.get_calendars] +- **`calendar_id` (Optional, string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +- **`page` (Optional, { from, size })**: This object is supported only when you omit the calendar identifier. +- **`from` (Optional, number)**: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. +- **`size` (Optional, number)**: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. + +## client.ml.getCategories [_ml.get_categories] +Get anomaly detection job results for categories. +```ts +client.ml.getCategories({ job_id }) +``` + +### Arguments [_arguments_ml.get_categories] + +#### Request (object) [_request_ml.get_categories] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`category_id` (Optional, string)**: Identifier for the category, which is unique in the job. If you specify +neither the category ID nor the partition_field_value, the API returns +information about all categories. If you specify only the +partition_field_value, it returns information about all categories for +the specified partition. +- **`page` (Optional, { from, size })**: Configures pagination. +This parameter has the `from` and `size` properties. +- **`from` (Optional, number)**: Skips the specified number of categories. +- **`partition_field_value` (Optional, string)**: Only return categories for the specified partition. +- **`size` (Optional, number)**: Specifies the maximum number of categories to obtain. + +## client.ml.getDataFrameAnalytics [_ml.get_data_frame_analytics] +Get data frame analytics job configuration info. +You can get information for multiple data frame analytics jobs in a single +API request by using a list of data frame analytics jobs or a +wildcard expression. +```ts +client.ml.getDataFrameAnalytics({ ... }) +``` + +### Arguments [_arguments_ml.get_data_frame_analytics] + +#### Request (object) [_request_ml.get_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. +- **`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +## client.ml.getDataFrameAnalyticsStats [_ml.get_data_frame_analytics_stats] +Get data frame analytics job stats. +```ts +client.ml.getDataFrameAnalyticsStats({ ... }) +``` + +### Arguments [_arguments_ml.get_data_frame_analytics_stats] + +#### Request (object) [_request_ml.get_data_frame_analytics_stats] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. +- **`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. +- **`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose. + +## client.ml.getDatafeedStats [_ml.get_datafeed_stats] +Get datafeed stats. +You can get statistics for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get statistics for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. If the datafeed is stopped, the +only information you receive is the `datafeed_id` and the `state`. +This API returns a maximum of 10,000 datafeeds. +```ts +client.ml.getDatafeedStats({ ... }) +``` + +### Arguments [_arguments_ml.get_datafeed_stats] + +#### Request (object) [_request_ml.get_datafeed_stats] +- **`datafeed_id` (Optional, string \| string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. + +## client.ml.getDatafeeds [_ml.get_datafeeds] +Get datafeeds configuration info. +You can get information for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get information for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. +This API returns a maximum of 10,000 datafeeds. +```ts +client.ml.getDatafeeds({ ... }) +``` + +### Arguments [_arguments_ml.get_datafeeds] + +#### Request (object) [_request_ml.get_datafeeds] +- **`datafeed_id` (Optional, string \| string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +## client.ml.getFilters [_ml.get_filters] +Get filters. +You can get a single filter or all filters. +```ts +client.ml.getFilters({ ... }) +``` + +### Arguments [_arguments_ml.get_filters] + +#### Request (object) [_request_ml.get_filters] +- **`filter_id` (Optional, string \| string[])**: A string that uniquely identifies a filter. +- **`from` (Optional, number)**: Skips the specified number of filters. +- **`size` (Optional, number)**: Specifies the maximum number of filters to obtain. + +## client.ml.getInfluencers [_ml.get_influencers] +Get anomaly detection job results for influencers. +Influencers are the entities that have contributed to, or are to blame for, +the anomalies. Influencer results are available only if an +`influencer_field_name` is specified in the job configuration. +```ts +client.ml.getInfluencers({ job_id }) +``` + +### Arguments [_arguments_ml.get_influencers] + +#### Request (object) [_request_ml.get_influencers] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`page` (Optional, { from, size })**: Configures pagination. +This parameter has the `from` and `size` properties. +- **`desc` (Optional, boolean)**: If true, the results are sorted in descending order. +- **`end` (Optional, string \| Unit)**: Returns influencers with timestamps earlier than this time. +The default value means it is unset and results are not limited to +specific timestamps. +- **`exclude_interim` (Optional, boolean)**: If true, the output excludes interim results. By default, interim results +are included. +- **`influencer_score` (Optional, number)**: Returns influencers with anomaly scores greater than or equal to this +value. +- **`from` (Optional, number)**: Skips the specified number of influencers. +- **`size` (Optional, number)**: Specifies the maximum number of influencers to obtain. +- **`sort` (Optional, string)**: Specifies the sort field for the requested influencers. By default, the +influencers are sorted by the `influencer_score` value. +- **`start` (Optional, string \| Unit)**: Returns influencers with timestamps after this time. The default value +means it is unset and results are not limited to specific timestamps. + +## client.ml.getJobStats [_ml.get_job_stats] +Get anomaly detection job stats. +```ts +client.ml.getJobStats({ ... }) +``` + +### Arguments [_arguments_ml.get_job_stats] + +#### Request (object) [_request_ml.get_job_stats] +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs, or a wildcard expression. If +you do not specify one of these options, the API returns information for +all anomaly detection jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If `false`, the API returns a `404` status +code when there are no matches or only partial matches. + +## client.ml.getJobs [_ml.get_jobs] +Get anomaly detection jobs configuration info. +You can get information for multiple anomaly detection jobs in a single API +request by using a group name, a list of jobs, or a wildcard +expression. You can get information for all anomaly detection jobs by using +`_all`, by specifying `*` as the ``, or by omitting the ``. +```ts +client.ml.getJobs({ ... }) +``` + +### Arguments [_arguments_ml.get_jobs] + +#### Request (object) [_request_ml.get_jobs] +- **`job_id` (Optional, string \| string[])**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. If you do not specify one of these +options, the API returns information for all anomaly detection jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status +code when there are no matches or only partial matches. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +## client.ml.getMemoryStats [_ml.get_memory_stats] +Get machine learning memory usage info. +Get information about how machine learning jobs and trained models are using memory, +on each node, both within the JVM heap, and natively, outside of the JVM. +```ts +client.ml.getMemoryStats({ ... }) +``` + +### Arguments [_arguments_ml.get_memory_stats] + +#### Request (object) [_request_ml.get_memory_stats] +- **`node_id` (Optional, string)**: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or +`ml:true` +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout +expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request +fails and returns an error. + +## client.ml.getModelSnapshotUpgradeStats [_ml.get_model_snapshot_upgrade_stats] +Get anomaly detection job model snapshot upgrade usage info. +```ts +client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.get_model_snapshot_upgrade_stats] + +#### Request (object) [_request_ml.get_model_snapshot_upgrade_stats] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + - Contains wildcard expressions and there are no jobs that match. + - Contains the _all string or no identifiers and there are no matches. + - Contains wildcard expressions and there are only partial matches. + +The default value is true, which returns an empty jobs array when there are no matches and the subset of results +when there are partial matches. If this parameter is false, the request returns a 404 status code when there are +no matches or only partial matches. + +## client.ml.getModelSnapshots [_ml.get_model_snapshots] +Get model snapshots info. +```ts +client.ml.getModelSnapshots({ job_id }) +``` + +### Arguments [_arguments_ml.get_model_snapshots] + +#### Request (object) [_request_ml.get_model_snapshots] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (Optional, string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`page` (Optional, { from, size })** +- **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of snapshots. +- **`size` (Optional, number)**: Specifies the maximum number of snapshots to obtain. + +## client.ml.getOverallBuckets [_ml.get_overall_buckets] +Get overall bucket results. + +Retrievs overall bucket results that summarize the bucket results of +multiple anomaly detection jobs. + +The `overall_score` is calculated by combining the scores of all the +buckets within the overall bucket span. First, the maximum +`anomaly_score` per anomaly detection job in the overall bucket is +calculated. Then the `top_n` of those scores are averaged to result in +the `overall_score`. This means that you can fine-tune the +`overall_score` so that it is more or less sensitive to the number of +jobs that detect an anomaly at the same time. For example, if you set +`top_n` to `1`, the `overall_score` is the maximum bucket score in the +overall bucket. Alternatively, if you set `top_n` to the number of jobs, +the `overall_score` is high only when all jobs detect anomalies in that +overall bucket. If you set the `bucket_span` parameter (to a value +greater than its default), the `overall_score` is the maximum +`overall_score` of the overall buckets that have a span equal to the +jobs' largest bucket span. +```ts +client.ml.getOverallBuckets({ job_id }) +``` + +### Arguments [_arguments_ml.get_overall_buckets] + +#### Request (object) [_request_ml.get_overall_buckets] +- **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs or groups, or a wildcard +expression. + +You can summarize the bucket results for all anomaly detection jobs by +using `_all` or by specifying `*` as the ``. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`bucket_span` (Optional, string \| -1 \| 0)**: Refer to the description for the `bucket_span` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`overall_score` (Optional, number \| string)**: Refer to the description for the `overall_score` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. + +## client.ml.getRecords [_ml.get_records] +Get anomaly records for an anomaly detection job. +Records contain the detailed analytical results. They describe the anomalous +activity that has been identified in the input data based on the detector +configuration. +There can be many anomaly records depending on the characteristics and size +of the input data. In practice, there are often too many to be able to +manually process them. The machine learning features therefore perform a +sophisticated aggregation of the anomaly records into buckets. +The number of record results depends on the number of anomalies found in each +bucket, which relates to the number of time series being modeled and the +number of detectors. +```ts +client.ml.getRecords({ job_id }) +``` + +### Arguments [_arguments_ml.get_records] + +#### Request (object) [_request_ml.get_records] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`page` (Optional, { from, size })** +- **`record_score` (Optional, number)**: Refer to the description for the `record_score` query parameter. +- **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of records. +- **`size` (Optional, number)**: Specifies the maximum number of records to obtain. + +## client.ml.getTrainedModels [_ml.get_trained_models] +Get trained model configuration info. +```ts +client.ml.getTrainedModels({ ... }) +``` + +### Arguments [_arguments_ml.get_trained_models] + +#### Request (object) [_request_ml.get_trained_models] +- **`model_id` (Optional, string \| string[])**: The unique identifier of the trained model or a model alias. + +You can get information for multiple trained models in a single API +request by using a list of model IDs or a wildcard +expression. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +- **`decompress_definition` (Optional, boolean)**: Specifies whether the included model definition should be returned as a +JSON map (true) or in a custom compressed format (false). +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +- **`from` (Optional, number)**: Skips the specified number of models. +- **`include` (Optional, Enum("definition" \| "feature_importance_baseline" \| "hyperparameters" \| "total_feature_importance" \| "definition_status"))**: A comma delimited string of optional fields to include in the response +body. +- **`include_model_definition` (Optional, boolean)**: parameter is deprecated! Use [include=definition] instead +- **`size` (Optional, number)**: Specifies the maximum number of models to obtain. +- **`tags` (Optional, string \| string[])**: A comma delimited string of tags. A trained model can have many tags, or +none. When supplied, only trained models that contain all the supplied +tags are returned. + +## client.ml.getTrainedModelsStats [_ml.get_trained_models_stats] +Get trained models usage info. +You can get usage information for multiple trained +models in a single API request by using a list of model IDs or a wildcard expression. +```ts +client.ml.getTrainedModelsStats({ ... }) +``` + +### Arguments [_arguments_ml.get_trained_models_stats] + +#### Request (object) [_request_ml.get_trained_models_stats] +- **`model_id` (Optional, string \| string[])**: The unique identifier of the trained model or a model alias. It can be a +list or a wildcard expression. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +- **`from` (Optional, number)**: Skips the specified number of models. +- **`size` (Optional, number)**: Specifies the maximum number of models to obtain. + +## client.ml.inferTrainedModel [_ml.infer_trained_model] +Evaluate a trained model. +```ts +client.ml.inferTrainedModel({ model_id, docs }) +``` + +### Arguments [_arguments_ml.infer_trained_model] + +#### Request (object) [_request_ml.infer_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`docs` (Record[])**: An array of objects to pass to the model for inference. The objects should contain a fields matching your +configured trained model input. Typically, for NLP models, the field name is `text_field`. +Currently, for NLP models, only a single value is allowed. +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The inference configuration updates to apply on the API call +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait for inference results. + +## client.ml.info [_ml.info] +Get machine learning information. +Get defaults and limits used by machine learning. +This endpoint is designed to be used by a user interface that needs to fully +understand machine learning configurations where some options are not +specified, meaning that the defaults should be used. This endpoint may be +used to find out what those defaults are. It also provides information about +the maximum size of machine learning jobs that could run in the current +cluster configuration. +```ts +client.ml.info() +``` + + +## client.ml.openJob [_ml.open_job] +Open anomaly detection jobs. + +An anomaly detection job must be opened to be ready to receive and analyze +data. It can be opened and closed multiple times throughout its lifecycle. +When you open a new job, it starts with an empty model. +When you open an existing job, the most recent model state is automatically +loaded. The job is ready to resume its analysis from where it left off, once +new data is received. +```ts +client.ml.openJob({ job_id }) +``` + +### Arguments [_arguments_ml.open_job] + +#### Request (object) [_request_ml.open_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. + +## client.ml.postCalendarEvents [_ml.post_calendar_events] +Add scheduled events to the calendar. +```ts +client.ml.postCalendarEvents({ calendar_id, events }) +``` + +### Arguments [_arguments_ml.post_calendar_events] + +#### Request (object) [_request_ml.post_calendar_events] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`events` ({ calendar_id, event_id, description, end_time, start_time }[])**: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. + +## client.ml.postData [_ml.post_data] +Send data to an anomaly detection job for analysis. + +IMPORTANT: For each job, data can be accepted from only a single connection at a time. +It is not currently possible to post data to multiple jobs using wildcards or a list. +```ts +client.ml.postData({ job_id }) +``` + +### Arguments [_arguments_ml.post_data] + +#### Request (object) [_request_ml.post_data] +- **`job_id` (string)**: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. +- **`data` (Optional, TData[])** +- **`reset_end` (Optional, string \| Unit)**: Specifies the end of the bucket resetting range. +- **`reset_start` (Optional, string \| Unit)**: Specifies the start of the bucket resetting range. + +## client.ml.previewDataFrameAnalytics [_ml.preview_data_frame_analytics] +Preview features used by data frame analytics. +Preview the extracted features used by a data frame analytics config. +```ts +client.ml.previewDataFrameAnalytics({ ... }) +``` + +### Arguments [_arguments_ml.preview_data_frame_analytics] + +#### Request (object) [_request_ml.preview_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. +- **`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })**: A data frame analytics config as described in create data frame analytics +jobs. Note that `id` and `dest` don’t need to be provided in the context of +this API. + +## client.ml.previewDatafeed [_ml.preview_datafeed] +Preview a datafeed. +This API returns the first "page" of search results from a datafeed. +You can preview an existing datafeed or provide configuration details for a datafeed +and anomaly detection job in the API. The preview shows the structure of the data +that will be passed to the anomaly detection engine. +IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that +called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the +datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. +You can also use secondary authorization headers to supply the credentials. +```ts +client.ml.previewDatafeed({ ... }) +``` + +### Arguments [_arguments_ml.preview_datafeed] + +#### Request (object) [_request_ml.preview_datafeed] +- **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job +configuration details in the request body. +- **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: The datafeed definition to preview. +- **`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })**: The configuration details for the anomaly detection job that is associated with the datafeed. If the +`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must +supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is +used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. +- **`start` (Optional, string \| Unit)**: The start time from where the datafeed preview should begin +- **`end` (Optional, string \| Unit)**: The end time when the datafeed preview should stop + +## client.ml.putCalendar [_ml.put_calendar] +Create a calendar. +```ts +client.ml.putCalendar({ calendar_id }) +``` + +### Arguments [_arguments_ml.put_calendar] + +#### Request (object) [_request_ml.put_calendar] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_ids` (Optional, string[])**: An array of anomaly detection job identifiers. +- **`description` (Optional, string)**: A description of the calendar. + +## client.ml.putCalendarJob [_ml.put_calendar_job] +Add anomaly detection job to calendar. +```ts +client.ml.putCalendarJob({ calendar_id, job_id }) +``` + +### Arguments [_arguments_ml.put_calendar_job] + +#### Request (object) [_request_ml.put_calendar_job] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_id` (string \| string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. + +## client.ml.putDataFrameAnalytics [_ml.put_data_frame_analytics] +Create a data frame analytics job. +This API creates a data frame analytics job that performs an analysis on the +source indices and stores the outcome in a destination index. +By default, the query used in the source configuration is `{"match_all": {}}`. + +If the destination index does not exist, it is created automatically when you start the job. + +If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. +```ts +client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) +``` + +### Arguments [_arguments_ml.put_data_frame_analytics] + +#### Request (object) [_request_ml.put_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`analysis` ({ classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +- **`dest` ({ index, results_field })**: The destination configuration. +- **`source` ({ index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. If +set to `false` and a machine learning node with capacity to run the job +cannot be immediately found, the API returns an error. If set to `true`, +the API does not return an error; the job waits in the `starting` state +until sufficient machine learning node capacity is available. This +behavior is also affected by the cluster-wide +`xpack.ml.max_lazy_ml_nodes` setting. +- **`analyzed_fields` (Optional, { includes, excludes })**: Specifies `includes` and/or `excludes` patterns to select which fields +will be included in the analysis. The patterns specified in `excludes` +are applied last, therefore `excludes` takes precedence. In other words, +if the same field is specified in both `includes` and `excludes`, then +the field will not be included in the analysis. If `analyzed_fields` is +not set, only the relevant fields will be included. For example, all the +numeric fields for outlier detection. +The supported fields vary for each type of analysis. Outlier detection +requires numeric or `boolean` data to analyze. The algorithms don’t +support missing values therefore fields that have data types other than +numeric or boolean are ignored. Documents where included fields contain +missing values, null values, or an array are also ignored. Therefore the +`dest` index may contain documents that don’t have an outlier score. +Regression supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the regression analysis. +Classification supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the classification analysis. +Classification analysis can be improved by mapping ordinal variable +values to a single number. For example, in case of age ranges, you can +model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. +- **`description` (Optional, string)**: A description of the job. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`_meta` (Optional, Record)** +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`headers` (Optional, Record)** +- **`version` (Optional, string)** + +## client.ml.putDatafeed [_ml.put_datafeed] +Create a datafeed. +Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. +You can associate only one datafeed with each anomaly detection job. +The datafeed contains a query that runs at a defined interval (`frequency`). +If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had +at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. +You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed +directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. +```ts +client.ml.putDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.put_datafeed] + +#### Request (object) [_request_ml.put_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. +Support for aggregations is limited and should be used only with low cardinality data. +- **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might be required to search over long time periods, for several months or years. +This search is split into time chunks in order to ensure the load on Elasticsearch is managed. +Chunking configuration controls how the size of these time chunks are calculated; +it is an advanced configuration option. +- **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. +The datafeed can optionally search over indices that have already been read in an effort to determine whether +any data has subsequently been added to the index. If missing data is found, it is a good indication that the +`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. +This check runs only on real-time datafeeds. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. +The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible +fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last +(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses +aggregations, this value must be divisible by the interval of the date histogram aggregation. +- **`indices` (Optional, string \| string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master +nodes and the machine learning nodes must have the `remote_cluster_client` role. +- **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. +- **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. +- **`query_delay` (Optional, string \| -1 \| 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +- **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. +- **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +- **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`, which is 10,000 by default. +- **`headers` (Optional, Record)** +- **`allow_no_indices` (Optional, boolean)**: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` +string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, unavailable indices (missing or closed) are ignored. + +## client.ml.putFilter [_ml.put_filter] +Create a filter. +A filter contains a list of strings. It can be used by one or more anomaly detection jobs. +Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. +```ts +client.ml.putFilter({ filter_id }) +``` + +### Arguments [_arguments_ml.put_filter] + +#### Request (object) [_request_ml.put_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. +- **`description` (Optional, string)**: A description of the filter. +- **`items` (Optional, string[])**: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. +Up to 10000 items are allowed in each filter. + +## client.ml.putJob [_ml.put_job] +Create an anomaly detection job. + +If you include a `datafeed_config`, you must have read index privileges on the source index. +If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. +```ts +client.ml.putJob({ job_id, analysis_config, data_description }) +``` + +### Arguments [_arguments_ml.put_job] + +#### Request (object) [_request_ml.put_job] +- **`job_id` (string)**: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +- **`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. +- **`data_description` ({ format, time_field, time_format, field_delimiter })**: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. +- **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. +- **`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })**: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. +- **`background_persist_interval` (Optional, string \| -1 \| 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. +- **`custom_settings` (Optional, User-defined value)**: Advanced configuration option. Contains custom meta data about the job. +- **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. +- **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. +- **`description` (Optional, string)**: A description of the job. +- **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. +- **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })**: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. +- **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. +- **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. +- **`results_index_name` (Optional, string)**: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. +- **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. +- **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. + +## client.ml.putTrainedModel [_ml.put_trained_model] +Create a trained model. +Enable you to supply a trained model that is not created by data frame analytics. +```ts +client.ml.putTrainedModel({ model_id }) +``` + +### Arguments [_arguments_ml.put_trained_model] + +#### Request (object) [_request_ml.put_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`compressed_definition` (Optional, string)**: The compressed (GZipped and Base64 encoded) inference definition of the +model. If compressed_definition is specified, then definition cannot be +specified. +- **`definition` (Optional, { preprocessors, trained_model })**: The inference definition for the model. If definition is specified, then +compressed_definition cannot be specified. +- **`description` (Optional, string)**: A human-readable description of the inference trained model. +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, learning_to_rank, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression +or classification configuration. It must match the underlying +definition.trained_model's target_type. For pre-packaged models such as +ELSER the config is not required. +- **`input` (Optional, { field_names })**: The input field names for the model definition. +- **`metadata` (Optional, User-defined value)**: An object map that contains metadata about the model. +- **`model_type` (Optional, Enum("tree_ensemble" \| "lang_ident" \| "pytorch"))**: The model type. +- **`model_size_bytes` (Optional, number)**: The estimated memory usage in bytes to keep the trained model in memory. +This property is supported only if defer_definition_decompression is true +or the model definition is not supplied. +- **`platform_architecture` (Optional, string)**: The platform architecture (if applicable) of the trained mode. If the model +only works on one platform, because it is heavily optimized for a particular +processor architecture and OS combination, then this field specifies which. +The format of the string must match the platform identifiers used by Elasticsearch, +so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, +or `windows-x86_64`. For portable models (those that work independent of processor +architecture or OS features), leave this field unset. +- **`tags` (Optional, string[])**: An array of tags to organize the model. +- **`prefix_strings` (Optional, { ingest, search })**: Optional prefix strings applied at inference +- **`defer_definition_decompression` (Optional, boolean)**: If set to `true` and a `compressed_definition` is provided, +the request defers definition decompression and skips relevant +validations. +- **`wait_for_completion` (Optional, boolean)**: Whether to wait for all child operations (e.g. model download) +to complete. + +## client.ml.putTrainedModelAlias [_ml.put_trained_model_alias] +Create or update a trained model alias. +A trained model alias is a logical name used to reference a single trained +model. +You can use aliases instead of trained model identifiers to make it easier to +reference your models. For example, you can use aliases in inference +aggregations and processors. +An alias must be unique and refer to only a single trained model. However, +you can have multiple aliases for each trained model. +If you use this API to update an alias such that it references a different +trained model ID and the model uses a different type of data frame analytics, +an error occurs. For example, this situation occurs if you have a trained +model for regression analysis and a trained model for classification +analysis; you cannot reassign an alias from one type of trained model to +another. +If you use this API to update an alias and there are very few input fields in +common between the old and new trained models for the model alias, the API +returns a warning. +```ts +client.ml.putTrainedModelAlias({ model_alias, model_id }) +``` + +### Arguments [_arguments_ml.put_trained_model_alias] + +#### Request (object) [_request_ml.put_trained_model_alias] +- **`model_alias` (string)**: The alias to create or update. This value cannot end in numbers. +- **`model_id` (string)**: The identifier for the trained model that the alias refers to. +- **`reassign` (Optional, boolean)**: Specifies whether the alias gets reassigned to the specified trained +model if it is already assigned to a different model. If the alias is +already assigned and this parameter is false, the API returns an error. + +## client.ml.putTrainedModelDefinitionPart [_ml.put_trained_model_definition_part] +Create part of a trained model definition. +```ts +client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) +``` + +### Arguments [_arguments_ml.put_trained_model_definition_part] + +#### Request (object) [_request_ml.put_trained_model_definition_part] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`part` (number)**: The definition part number. When the definition is loaded for inference the definition parts are streamed in the +order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. +- **`definition` (string)**: The definition part for the model. Must be a base64 encoded string. +- **`total_definition_length` (number)**: The total uncompressed definition length in bytes. Not base64 encoded. +- **`total_parts` (number)**: The total number of parts that will be uploaded. Must be greater than 0. + +## client.ml.putTrainedModelVocabulary [_ml.put_trained_model_vocabulary] +Create a trained model vocabulary. +This API is supported only for natural language processing (NLP) models. +The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. +```ts +client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) +``` + +### Arguments [_arguments_ml.put_trained_model_vocabulary] + +#### Request (object) [_request_ml.put_trained_model_vocabulary] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`vocabulary` (string[])**: The model vocabulary, which must not be empty. +- **`merges` (Optional, string[])**: The optional model merges if required by the tokenizer. +- **`scores` (Optional, number[])**: The optional vocabulary value scores if required by the tokenizer. + +## client.ml.resetJob [_ml.reset_job] +Reset an anomaly detection job. +All model state and results are deleted. The job is ready to start over as if +it had just been created. +It is not currently possible to reset multiple jobs using wildcards or a +comma separated list. +```ts +client.ml.resetJob({ job_id }) +``` + +### Arguments [_arguments_ml.reset_job] + +#### Request (object) [_request_ml.reset_job] +- **`job_id` (string)**: The ID of the job to reset. +- **`wait_for_completion` (Optional, boolean)**: Should this request wait until the operation has completed before +returning. +- **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. + +## client.ml.revertModelSnapshot [_ml.revert_model_snapshot] +Revert to a snapshot. +The machine learning features react quickly to anomalous input, learning new +behaviors in data. Highly anomalous input increases the variance in the +models whilst the system learns whether this is a new step-change in behavior +or a one-off event. In the case where this anomalous input is known to be a +one-off, then it might be appropriate to reset the model state to a time +before this event. For example, you might consider reverting to a saved +snapshot after Black Friday or a critical system failure. +```ts +client.ml.revertModelSnapshot({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.revert_model_snapshot] + +#### Request (object) [_request_ml.revert_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: You can specify `empty` as the . Reverting to the empty +snapshot means the anomaly detection job starts learning a new model from +scratch when it is started. +- **`delete_intervening_results` (Optional, boolean)**: Refer to the description for the `delete_intervening_results` query parameter. + +## client.ml.setUpgradeMode [_ml.set_upgrade_mode] +Set upgrade_mode for ML indices. +Sets a cluster wide upgrade_mode setting that prepares machine learning +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your machine learning indices. In those circumstances, +there must be no machine learning jobs running. You can close the machine +learning jobs, do the upgrade, then open all the jobs again. Alternatively, +you can use this API to temporarily halt tasks associated with the jobs and +datafeeds and prevent new jobs from opening. You can also use this API +during upgrades that do not require you to reindex your machine learning +indices, though stopping jobs is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +machine learning info API. +```ts +client.ml.setUpgradeMode({ ... }) +``` + +### Arguments [_arguments_ml.set_upgrade_mode] + +#### Request (object) [_request_ml.set_upgrade_mode] +- **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all job +and datafeed tasks and prohibits new job and datafeed tasks from +starting. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the request to be completed. + +## client.ml.startDataFrameAnalytics [_ml.start_data_frame_analytics] +Start a data frame analytics job. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. +If the destination index does not exist, it is created automatically the +first time you start the data frame analytics job. The +`index.number_of_shards` and `index.number_of_replicas` settings for the +destination index are copied from the source index. If there are multiple +source indices, the destination index copies the highest setting values. The +mappings for the destination index are also copied from the source indices. +If there are any mapping conflicts, the job fails to start. +If the destination index exists, it is used as is. You can therefore set up +the destination index in advance with custom settings and mappings. +```ts +client.ml.startDataFrameAnalytics({ id }) +``` + +### Arguments [_arguments_ml.start_data_frame_analytics] + +#### Request (object) [_request_ml.start_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait until the data frame analytics job +starts. + +## client.ml.startDatafeed [_ml.start_datafeed] +Start datafeeds. + +A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. + +Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. + +If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. +If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or +update it had at the time of creation or update and runs the query using those same roles. If you provided secondary +authorization headers when you created or updated the datafeed, those credentials are used instead. +```ts +client.ml.startDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.start_datafeed] + +#### Request (object) [_request_ml.start_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. + +## client.ml.startTrainedModelDeployment [_ml.start_trained_model_deployment] +Start a trained model deployment. +It allocates the model to every machine learning node. +```ts +client.ml.startTrainedModelDeployment({ model_id }) +``` + +### Arguments [_arguments_ml.start_trained_model_deployment] + +#### Request (object) [_request_ml.start_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. +- **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. +- **`cache_size` (Optional, number \| string)**: The inference cache size (in memory outside the JVM heap) per node for the model. +The default value is the same size as the `model_size_bytes`. To disable the cache, +`0b` can be provided. +- **`deployment_id` (Optional, string)**: A unique identifier for the deployment of the model. +- **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +- **`priority` (Optional, Enum("normal" \| "low"))**: The deployment priority. +- **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds +this value, new requests are rejected with a 429 error. +- **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases +the inference speed. The inference process is a compute-bound process; any number +greater than the number of available hardware threads on the machine does not increase the +inference speed. If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the model to deploy. +- **`wait_for` (Optional, Enum("started" \| "starting" \| "fully_allocated"))**: Specifies the allocation status to wait for before returning. + +## client.ml.stopDataFrameAnalytics [_ml.stop_data_frame_analytics] +Stop data frame analytics jobs. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. +```ts +client.ml.stopDataFrameAnalytics({ id }) +``` + +### Arguments [_arguments_ml.stop_data_frame_analytics] + +#### Request (object) [_request_ml.stop_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is true, which returns an empty data_frame_analytics +array when there are no matches and the subset of results when there are +partial matches. If this parameter is false, the request returns a 404 +status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: If true, the data frame analytics job is stopped forcefully. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait until the data frame analytics job +stops. Defaults to 20 seconds. + +## client.ml.stopDatafeed [_ml.stop_datafeed] +Stop datafeeds. +A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. +```ts +client.ml.stopDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.stop_datafeed] + +#### Request (object) [_request_ml.stop_datafeed] +- **`datafeed_id` (string)**: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated +list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as +the identifier. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`force` (Optional, boolean)**: Refer to the description for the `force` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. + +## client.ml.stopTrainedModelDeployment [_ml.stop_trained_model_deployment] +Stop a trained model deployment. +```ts +client.ml.stopTrainedModelDeployment({ model_id }) +``` + +### Arguments [_arguments_ml.stop_trained_model_deployment] + +#### Request (object) [_request_ml.stop_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; +contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and +there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you +restart the model deployment. + +## client.ml.updateDataFrameAnalytics [_ml.update_data_frame_analytics] +Update a data frame analytics job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ml-update-data-frame-analytics) + +```ts +client.ml.updateDataFrameAnalytics({ id }) +``` + +### Arguments [_arguments_ml.update_data_frame_analytics] + +#### Request (object) [_request_ml.update_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`description` (Optional, string)**: A description of the job. +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +## client.ml.updateDatafeed [_ml.update_datafeed] +Update a datafeed. +You must stop and start the datafeed for the changes to be applied. +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at +the time of the update and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. +```ts +client.ml.updateDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.update_datafeed] + +#### Request (object) [_request_ml.update_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +with low cardinality data. +- **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might search over long time periods, for several months or years. This search is split into time +chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of +these time chunks are calculated; it is an advanced configuration option. +- **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally +search over indices that have already been read in an effort to determine whether any data has subsequently been +added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and +the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time +datafeeds. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is +either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket +span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are +written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value +must be divisible by the interval of the date histogram aggregation. +- **`indices` (Optional, string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine +learning nodes must have the `remote_cluster_client` role. +- **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search. +- **`job_id` (Optional, string)** +- **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also +changed. Therefore, the time required to learn might be long and the understandability of the results is +unpredictable. If you want to make significant changes to the source data, it is recommended that you +clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one +when you are satisfied with the results of the job. +- **`query_delay` (Optional, string \| -1 \| 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +- **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. +- **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +- **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`. +- **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. + +## client.ml.updateFilter [_ml.update_filter] +Update a filter. +Updates the description of a filter, adds items, or removes items from the list. +```ts +client.ml.updateFilter({ filter_id }) +``` + +### Arguments [_arguments_ml.update_filter] + +#### Request (object) [_request_ml.update_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. +- **`add_items` (Optional, string[])**: The items to add to the filter. +- **`description` (Optional, string)**: A description for the filter. +- **`remove_items` (Optional, string[])**: The items to remove from the filter. + +## client.ml.updateJob [_ml.update_job] +Update an anomaly detection job. +Updates certain properties of an anomaly detection job. +```ts +client.ml.updateJob({ job_id }) +``` + +### Arguments [_arguments_ml.update_job] + +#### Request (object) [_request_ml.update_job] +- **`job_id` (string)**: Identifier for the job. +- **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when +there is insufficient machine learning node capacity for it to be +immediately assigned to a node. If `false` and a machine learning node +with capacity to run the job cannot immediately be found, the open +anomaly detection jobs API returns an error. However, this is also +subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this +option is set to `true`, the open anomaly detection jobs API does not +return an error and the job waits in the opening state until sufficient +machine learning node capacity is available. +- **`analysis_limits` (Optional, { model_memory_limit })** +- **`background_persist_interval` (Optional, string \| -1 \| 0)**: Advanced configuration option. The time between each periodic persistence +of the model. +The default value is a randomized value between 3 to 4 hours, which +avoids all jobs persisting at exactly the same time. The smallest allowed +value is 1 hour. +For very large models (several GB), persistence could take 10-20 minutes, +so do not set the value too low. +If the job is open when you make the update, you must stop the datafeed, +close the job, then reopen the job and restart the datafeed for the +changes to take effect. +- **`custom_settings` (Optional, Record)**: Advanced configuration option. Contains custom meta data about the job. +For example, it can contain custom URL information as shown in Adding +custom URLs to machine learning results. +- **`categorization_filters` (Optional, string[])** +- **`description` (Optional, string)**: A description of the job. +- **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })** +- **`model_prune_window` (Optional, string \| -1 \| 0)** +- **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies a period of time (in days) +after which only the first snapshot per day is retained. This period is +relative to the timestamp of the most recent snapshot for this job. Valid +values range from 0 to `model_snapshot_retention_days`. For jobs created +before version 7.8.0, the default value matches +`model_snapshot_retention_days`. +- **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies the maximum period of time (in +days) that snapshots are retained. This period is relative to the +timestamp of the most recent snapshot for this job. +- **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the +score are applied, as new data is seen. +- **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results +are retained. Age is calculated relative to the timestamp of the latest +bucket result. If this property has a non-null value, once per day at +00:30 (server time), results that are the specified number of days older +than the latest bucket result are deleted from Elasticsearch. The default +value is null, which means all results are retained. +- **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. +- **`detectors` (Optional, { detector_index, description, custom_rules }[])**: An array of detector update objects. +- **`per_partition_categorization` (Optional, { enabled, stop_on_warn })**: Settings related to how categorization interacts with partition fields. + +## client.ml.updateModelSnapshot [_ml.update_model_snapshot] +Update a snapshot. +Updates certain properties of a snapshot. +```ts +client.ml.updateModelSnapshot({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.update_model_snapshot] + +#### Request (object) [_request_ml.update_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: Identifier for the model snapshot. +- **`description` (Optional, string)**: A description of the model snapshot. +- **`retain` (Optional, boolean)**: If `true`, this snapshot will not be deleted during automatic cleanup of +snapshots older than `model_snapshot_retention_days`. However, this +snapshot will be deleted when the job is deleted. + +## client.ml.updateTrainedModelDeployment [_ml.update_trained_model_deployment] +Update a trained model deployment. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ml-update-trained-model-deployment) + +```ts +client.ml.updateTrainedModelDeployment({ model_id }) +``` + +### Arguments [_arguments_ml.update_trained_model_deployment] + +#### Request (object) [_request_ml.update_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. +- **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +- **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. + +## client.ml.upgradeJobSnapshot [_ml.upgrade_job_snapshot] +Upgrade a snapshot. +Upgrade an anomaly detection model snapshot to the latest major version. +Over time, older snapshot formats are deprecated and removed. Anomaly +detection jobs support only snapshots that are from the current or previous +major version. +This API provides a means to upgrade a snapshot to the current major version. +This aids in preparing the cluster for an upgrade to the next major version. +Only one snapshot per anomaly detection job can be upgraded at a time and the +upgraded snapshot cannot be the current snapshot of the anomaly detection +job. +```ts +client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.upgrade_job_snapshot] + +#### Request (object) [_request_ml.upgrade_job_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. +- **`wait_for_completion` (Optional, boolean)**: When true, the API won’t respond until the upgrade is complete. +Otherwise, it responds as soon as the upgrade task is assigned to a node. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the request to complete. + +## client.nodes.clearRepositoriesMeteringArchive [_nodes.clear_repositories_metering_archive] +Clear the archived repositories metering. +Clear the archived repositories metering information in the cluster. +```ts +client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) +``` + +### Arguments [_arguments_nodes.clear_repositories_metering_archive] + +#### Request (object) [_request_nodes.clear_repositories_metering_archive] +- **`node_id` (string \| string[])**: List of node IDs or names used to limit returned information. +- **`max_archive_version` (number)**: Specifies the maximum `archive_version` to be cleared from the archive. + +## client.nodes.getRepositoriesMeteringInfo [_nodes.get_repositories_metering_info] +Get cluster repositories metering. +Get repositories metering information for a cluster. +This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. +Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. +```ts +client.nodes.getRepositoriesMeteringInfo({ node_id }) +``` + +### Arguments [_arguments_nodes.get_repositories_metering_info] + +#### Request (object) [_request_nodes.get_repositories_metering_info] +- **`node_id` (string \| string[])**: List of node IDs or names used to limit returned information. +For more information about the nodes selective options, refer to the node specification documentation. + +## client.nodes.hotThreads [_nodes.hot_threads] +Get the hot threads for nodes. +Get a breakdown of the hot threads on each selected node in the cluster. +The output is plain text with a breakdown of the top hot threads for each node. +```ts +client.nodes.hotThreads({ ... }) +``` + +### Arguments [_arguments_nodes.hot_threads] + +#### Request (object) [_request_nodes.hot_threads] +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. +- **`ignore_idle_threads` (Optional, boolean)**: If true, known idle threads (e.g. waiting in a socket select, or to get +a task from an empty queue) are filtered out. +- **`interval` (Optional, string \| -1 \| 0)**: The interval to do the second sampling of threads. +- **`snapshots` (Optional, number)**: Number of samples of thread stacktrace. +- **`threads` (Optional, number)**: Specifies the number of hot threads to provide information for. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received +before the timeout expires, the request fails and returns an error. +- **`type` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The type to sample. +- **`sort` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The sort order for 'cpu' type (default: total) + +## client.nodes.info [_nodes.info] +Get node information. + +By default, the API returns all attributes and core settings for cluster nodes. +```ts +client.nodes.info({ ... }) +``` + +### Arguments [_arguments_nodes.info] + +#### Request (object) [_request_nodes.info] +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, string \| string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.nodes.reloadSecureSettings [_nodes.reload_secure_settings] +Reload the keystore on nodes in the cluster. + +Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. +That is, you can change them on disk and reload them without restarting any nodes in the cluster. +When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. + +When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. +Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. +Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. +```ts +client.nodes.reloadSecureSettings({ ... }) +``` + +### Arguments [_arguments_nodes.reload_secure_settings] + +#### Request (object) [_request_nodes.reload_secure_settings] +- **`node_id` (Optional, string \| string[])**: The names of particular nodes in the cluster to target. +- **`secure_settings_password` (Optional, string)**: The password for the Elasticsearch keystore. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.nodes.stats [_nodes.stats] +Get node statistics. +Get statistics for nodes in a cluster. +By default, all stats are returned. You can limit the returned information by using metrics. +```ts +client.nodes.stats({ ... }) +``` + +### Arguments [_arguments_nodes.stats] + +#### Request (object) [_request_nodes.stats] +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics +- **`index_metric` (Optional, string \| string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. +- **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`fielddata_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. +- **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. +- **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`types` (Optional, string[])**: A list of document types for the indexing index metric. +- **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. + +## client.nodes.usage [_nodes.usage] +Get feature usage information. +```ts +client.nodes.usage({ ... }) +``` + +### Arguments [_arguments_nodes.usage] + +#### Request (object) [_request_nodes.usage] +- **`node_id` (Optional, string \| string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +- **`metric` (Optional, string \| string[])**: Limits the information returned to the specific metrics. +A list of the following options: `_all`, `rest_actions`. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.queryRules.deleteRule [_query_rules.delete_rule] +Delete a query rule. +Delete a query rule within a query ruleset. +This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. +```ts +client.queryRules.deleteRule({ ruleset_id, rule_id }) +``` + +### Arguments [_arguments_query_rules.delete_rule] + +#### Request (object) [_request_query_rules.delete_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to delete +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to delete + +## client.queryRules.deleteRuleset [_query_rules.delete_ruleset] +Delete a query ruleset. +Remove a query ruleset and its associated data. +This is a destructive action that is not recoverable. +```ts +client.queryRules.deleteRuleset({ ruleset_id }) +``` + +### Arguments [_arguments_query_rules.delete_ruleset] + +#### Request (object) [_request_query_rules.delete_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to delete + +## client.queryRules.getRule [_query_rules.get_rule] +Get a query rule. +Get details about a query rule within a query ruleset. +```ts +client.queryRules.getRule({ ruleset_id, rule_id }) +``` + +### Arguments [_arguments_query_rules.get_rule] + +#### Request (object) [_request_query_rules.get_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to retrieve +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to retrieve + +## client.queryRules.getRuleset [_query_rules.get_ruleset] +Get a query ruleset. +Get details about a query ruleset. +```ts +client.queryRules.getRuleset({ ruleset_id }) +``` + +### Arguments [_arguments_query_rules.get_ruleset] + +#### Request (object) [_request_query_rules.get_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset + +## client.queryRules.listRulesets [_query_rules.list_rulesets] +Get all query rulesets. +Get summarized information about the query rulesets. +```ts +client.queryRules.listRulesets({ ... }) +``` + +### Arguments [_arguments_query_rules.list_rulesets] + +#### Request (object) [_request_query_rules.list_rulesets] +- **`from` (Optional, number)**: The offset from the first result to fetch. +- **`size` (Optional, number)**: The maximum number of results to retrieve. + +## client.queryRules.putRule [_query_rules.put_rule] +Create or update a query rule. +Create or update a query rule within a query ruleset. + +IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. +```ts +client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) +``` + +### Arguments [_arguments_query_rules.put_rule] + +#### Request (object) [_request_query_rules.put_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to be created or updated. +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to be created or updated. +- **`type` (Enum("pinned" \| "exclude"))**: The type of rule. +- **`criteria` ({ type, metadata, values } \| { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. +If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. +- **`actions` ({ ids, docs })**: The actions to take when the rule is matched. +The format of this action depends on the rule type. +- **`priority` (Optional, number)** + +## client.queryRules.putRuleset [_query_rules.put_ruleset] +Create or update a query ruleset. +There is a limit of 100 rules per ruleset. +This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + +IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. +```ts +client.queryRules.putRuleset({ ruleset_id, rules }) +``` + +### Arguments [_arguments_query_rules.put_ruleset] + +#### Request (object) [_request_query_rules.put_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated. +- **`rules` ({ rule_id, type, criteria, actions, priority } \| { rule_id, type, criteria, actions, priority }[])** + +## client.queryRules.test [_query_rules.test] +Test a query ruleset. +Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. +```ts +client.queryRules.test({ ruleset_id, match_criteria }) +``` + +### Arguments [_arguments_query_rules.test] + +#### Request (object) [_request_query_rules.test] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated +- **`match_criteria` (Record)**: The match criteria to apply to rules in the given query ruleset. +Match criteria should match the keys defined in the `criteria.metadata` field of the rule. + +## client.rollup.deleteJob [_rollup.delete_job] +Delete a rollup job. + +A job must be stopped before it can be deleted. +If you attempt to delete a started job, an error occurs. +Similarly, if you attempt to delete a nonexistent job, an exception occurs. + +IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. +The API does not delete any previously rolled up data. +This is by design; a user may wish to roll up a static data set. +Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). +Thus the job can be deleted, leaving behind the rolled up data for analysis. +If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. +If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: + +``` +POST my_rollup_index/_delete_by_query +{ + "query": { + "term": { + "_rollup.id": "the_rollup_job_id" + } + } +} +``` +```ts +client.rollup.deleteJob({ id }) +``` + +### Arguments [_arguments_rollup.delete_job] + +#### Request (object) [_request_rollup.delete_job] +- **`id` (string)**: Identifier for the job. + +## client.rollup.getJobs [_rollup.get_jobs] +Get rollup job information. +Get the configuration, stats, and status of rollup jobs. + +NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +If a job was created, ran for a while, then was deleted, the API does not return any details about it. +For details about a historical rollup job, the rollup capabilities API may be more useful. +```ts +client.rollup.getJobs({ ... }) +``` + +### Arguments [_arguments_rollup.get_jobs] + +#### Request (object) [_request_rollup.get_jobs] +- **`id` (Optional, string)**: Identifier for the rollup job. +If it is `_all` or omitted, the API returns all rollup jobs. + +## client.rollup.getRollupCaps [_rollup.get_rollup_caps] +Get the rollup job capabilities. +Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + +This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. +Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. +This API enables you to inspect an index and determine: + +1. Does this index have associated rollup data somewhere in the cluster? +2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? +```ts +client.rollup.getRollupCaps({ ... }) +``` + +### Arguments [_arguments_rollup.get_rollup_caps] + +#### Request (object) [_request_rollup.get_rollup_caps] +- **`id` (Optional, string)**: Index, indices or index-pattern to return rollup capabilities for. +`_all` may be used to fetch rollup capabilities from all jobs. + +## client.rollup.getRollupIndexCaps [_rollup.get_rollup_index_caps] +Get the rollup index capabilities. +Get the rollup capabilities of all jobs inside of a rollup index. +A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: + +* What jobs are stored in an index (or indices specified via a pattern)? +* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? +```ts +client.rollup.getRollupIndexCaps({ index }) +``` + +### Arguments [_arguments_rollup.get_rollup_index_caps] + +#### Request (object) [_request_rollup.get_rollup_index_caps] +- **`index` (string \| string[])**: Data stream or index to check for rollup capabilities. +Wildcard (`*`) expressions are supported. + +## client.rollup.putJob [_rollup.put_job] +Create a rollup job. + +WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. + +The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. + +There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. + +Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. +```ts +client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) +``` + +### Arguments [_arguments_rollup.put_job] + +#### Request (object) [_request_rollup.put_job] +- **`id` (string)**: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the +data that is associated with the rollup job. The ID is persistent; it is stored with the rolled +up data. If you create a job, let it run for a while, then delete the job, the data that the job +rolled up is still be associated with this job ID. You cannot create a new job with the same ID +since that could lead to problems with mismatched job configurations. +- **`cron` (string)**: A cron string which defines the intervals when the rollup job should be executed. When the interval +triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated +to the time interval of the data being rolled up. For example, you may wish to create hourly rollups +of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The +cron pattern is defined just like a Watcher cron schedule. +- **`groups` ({ date_histogram, histogram, terms })**: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be +available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of +the groups configuration as defining a set of tools that can later be used in aggregations to partition the +data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide +enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. +- **`index_pattern` (string)**: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to +rollup the entire index or index-pattern. +- **`page_size` (number)**: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends +to execute faster, but requires more memory during processing. This value has no effect on how the data is +rolled up; it is merely used for tweaking the speed or memory cost of the indexer. +- **`rollup_index` (string)**: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. +- **`metrics` (Optional, { field, metrics }[])**: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each +group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined +on a per-field basis and for each field you configure which metric should be collected. +- **`timeout` (Optional, string \| -1 \| 0)**: Time to wait for the request to complete. +- **`headers` (Optional, Record)** + +## client.rollup.rollupSearch [_rollup.rollup_search] +Search rolled-up data. +The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. + +The request body supports a subset of features from the regular search API. +The following functionality is not available: + +`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. +`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. + +**Searching both historical rollup and non-rollup data** + +The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. +This is done by simply adding the live indices to the URI. For example: + +``` +GET sensor-1,sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +``` + +The rollup search endpoint does two things when the search runs: + +* The original request is sent to the non-rollup index unaltered. +* A rewritten version of the original request is sent to the rollup index. + +When the two responses are received, the endpoint rewrites the rollup response and merges the two together. +During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. +```ts +client.rollup.rollupSearch({ index }) +``` + +### Arguments [_arguments_rollup.rollup_search] + +#### Request (object) [_request_rollup.rollup_search] +- **`index` (string \| string[])**: A list of data streams and indices used to limit the request. +This parameter has the following rules: + +* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. +* Multiple non-rollup indices may be specified. +* Only one rollup index may be specified. If more than one are supplied, an exception occurs. +* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. +- **`aggregations` (Optional, Record)**: Specifies aggregations. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specifies a DSL query that is subject to some limitations. +- **`size` (Optional, number)**: Must be zero if set, as rollups work on pre-aggregated data. +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response + +## client.rollup.startJob [_rollup.start_job] +Start rollup jobs. +If you try to start a job that does not exist, an exception occurs. +If you try to start a job that is already started, nothing happens. +```ts +client.rollup.startJob({ id }) +``` + +### Arguments [_arguments_rollup.start_job] + +#### Request (object) [_request_rollup.start_job] +- **`id` (string)**: Identifier for the rollup job. + +## client.rollup.stopJob [_rollup.stop_job] +Stop rollup jobs. +If you try to stop a job that does not exist, an exception occurs. +If you try to stop a job that is already stopped, nothing happens. + +Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. +This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: + +``` +POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +``` +The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. +If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. +```ts +client.rollup.stopJob({ id }) +``` + +### Arguments [_arguments_rollup.stop_job] + +#### Request (object) [_request_rollup.stop_job] +- **`id` (string)**: Identifier for the rollup job. +- **`timeout` (Optional, string \| -1 \| 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. +If more than `timeout` time has passed, the API throws a timeout exception. +NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. +The timeout simply means the API call itself timed out while waiting for the status change. +- **`wait_for_completion` (Optional, boolean)**: If set to `true`, causes the API to block until the indexer state completely stops. +If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. + +## client.searchApplication.delete [_search_application.delete] +Delete a search application. + +Remove a search application and its associated alias. Indices attached to the search application are not removed. +```ts +client.searchApplication.delete({ name }) +``` + +### Arguments [_arguments_search_application.delete] + +#### Request (object) [_request_search_application.delete] +- **`name` (string)**: The name of the search application to delete. + +## client.searchApplication.deleteBehavioralAnalytics [_search_application.delete_behavioral_analytics] +Delete a behavioral analytics collection. +The associated data stream is also deleted. +```ts +client.searchApplication.deleteBehavioralAnalytics({ name }) +``` + +### Arguments [_arguments_search_application.delete_behavioral_analytics] + +#### Request (object) [_request_search_application.delete_behavioral_analytics] +- **`name` (string)**: The name of the analytics collection to be deleted + +## client.searchApplication.get [_search_application.get] +Get search application details. +```ts +client.searchApplication.get({ name }) +``` + +### Arguments [_arguments_search_application.get] + +#### Request (object) [_request_search_application.get] +- **`name` (string)**: The name of the search application + +## client.searchApplication.getBehavioralAnalytics [_search_application.get_behavioral_analytics] +Get behavioral analytics collections. +```ts +client.searchApplication.getBehavioralAnalytics({ ... }) +``` + +### Arguments [_arguments_search_application.get_behavioral_analytics] + +#### Request (object) [_request_search_application.get_behavioral_analytics] +- **`name` (Optional, string[])**: A list of analytics collections to limit the returned information + +## client.searchApplication.list [_search_application.list] +Get search applications. +Get information about search applications. +```ts +client.searchApplication.list({ ... }) +``` + +### Arguments [_arguments_search_application.list] + +#### Request (object) [_request_search_application.list] +- **`q` (Optional, string)**: Query in the Lucene query string syntax. +- **`from` (Optional, number)**: Starting offset. +- **`size` (Optional, number)**: Specifies a max number of results to get. + +## client.searchApplication.postBehavioralAnalyticsEvent [_search_application.post_behavioral_analytics_event] +Create a behavioral analytics collection event. +```ts +client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) +``` + +### Arguments [_arguments_search_application.post_behavioral_analytics_event] + +#### Request (object) [_request_search_application.post_behavioral_analytics_event] +- **`collection_name` (string)**: The name of the behavioral analytics collection. +- **`event_type` (Enum("page_view" \| "search" \| "search_click"))**: The analytics event type. +- **`payload` (Optional, User-defined value)** +- **`debug` (Optional, boolean)**: Whether the response type has to include more details + +## client.searchApplication.put [_search_application.put] +Create or update a search application. +```ts +client.searchApplication.put({ name }) +``` + +### Arguments [_arguments_search_application.put] + +#### Request (object) [_request_search_application.put] +- **`name` (string)**: The name of the search application to be created or updated. +- **`search_application` (Optional, { indices, analytics_collection_name, template })** +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing Search Applications. + +## client.searchApplication.putBehavioralAnalytics [_search_application.put_behavioral_analytics] +Create a behavioral analytics collection. +```ts +client.searchApplication.putBehavioralAnalytics({ name }) +``` + +### Arguments [_arguments_search_application.put_behavioral_analytics] + +#### Request (object) [_request_search_application.put_behavioral_analytics] +- **`name` (string)**: The name of the analytics collection to be created or updated. + +## client.searchApplication.renderQuery [_search_application.render_query] +Render a search application query. +Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. +The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. + +You must have `read` privileges on the backing alias of the search application. +```ts +client.searchApplication.renderQuery({ name }) +``` + +### Arguments [_arguments_search_application.render_query] + +#### Request (object) [_request_search_application.render_query] +- **`name` (string)**: The name of the search application to render teh query for. +- **`params` (Optional, Record)** + +## client.searchApplication.search [_search_application.search] +Run a search application search. +Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. +Unspecified template parameters are assigned their default values if applicable. +```ts +client.searchApplication.search({ name }) +``` + +### Arguments [_arguments_search_application.search] + +#### Request (object) [_request_search_application.search] +- **`name` (string)**: The name of the search application to be searched. +- **`params` (Optional, Record)**: Query parameters specific to this request, which will override any defaults specified in the template. +- **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. + +## client.searchableSnapshots.cacheStats [_searchable_snapshots.cache_stats] +Get cache statistics. +Get statistics about the shared cache for partially mounted indices. +```ts +client.searchableSnapshots.cacheStats({ ... }) +``` + +### Arguments [_arguments_searchable_snapshots.cache_stats] + +#### Request (object) [_request_searchable_snapshots.cache_stats] +- **`node_id` (Optional, string \| string[])**: The names of the nodes in the cluster to target. +- **`master_timeout` (Optional, string \| -1 \| 0)** + +## client.searchableSnapshots.clearCache [_searchable_snapshots.clear_cache] +Clear the cache. +Clear indices and data streams from the shared cache for partially mounted indices. +```ts +client.searchableSnapshots.clearCache({ ... }) +``` + +### Arguments [_arguments_searchable_snapshots.clear_cache] + +#### Request (object) [_request_searchable_snapshots.clear_cache] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to clear from the cache. +It supports wildcards (`*`). +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) + +## client.searchableSnapshots.mount [_searchable_snapshots.mount] +Mount a snapshot. +Mount a snapshot as a searchable snapshot index. +Do not use this API for snapshots managed by index lifecycle management (ILM). +Manually mounting ILM-managed snapshots can interfere with ILM processes. +```ts +client.searchableSnapshots.mount({ repository, snapshot, index }) +``` + +### Arguments [_arguments_searchable_snapshots.mount] + +#### Request (object) [_request_searchable_snapshots.mount] +- **`repository` (string)**: The name of the repository containing the snapshot of the index to mount. +- **`snapshot` (string)**: The name of the snapshot of the index to mount. +- **`index` (string)**: The name of the index contained in the snapshot whose data is to be mounted. +If no `renamed_index` is specified, this name will also be used to create the new index. +- **`renamed_index` (Optional, string)**: The name of the index that will be created. +- **`index_settings` (Optional, Record)**: The settings that should be added to the index when it is mounted. +- **`ignore_index_settings` (Optional, string[])**: The names of settings that should be removed from the index when it is mounted. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until the operation is complete. +- **`storage` (Optional, string)**: The mount option for the searchable snapshot index. + +## client.searchableSnapshots.stats [_searchable_snapshots.stats] +Get searchable snapshot statistics. +```ts +client.searchableSnapshots.stats({ ... }) +``` + +### Arguments [_arguments_searchable_snapshots.stats] + +#### Request (object) [_request_searchable_snapshots.stats] +- **`index` (Optional, string \| string[])**: A list of data streams and indices to retrieve statistics for. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Return stats aggregated at cluster, index or shard level + +## client.security.activateUserProfile [_security.activate_user_profile] +Activate a user profile. + +Create or update a user profile on behalf of another user. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. +For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. + +When updating a profile document, the API enables the document if it was disabled. +Any updates do not change existing content for either the `labels` or `data` fields. +```ts +client.security.activateUserProfile({ grant_type }) +``` + +### Arguments [_arguments_security.activate_user_profile] + +#### Request (object) [_request_security.activate_user_profile] +- **`grant_type` (Enum("password" \| "access_token"))**: The type of grant. +- **`access_token` (Optional, string)**: The user's Elasticsearch access token or JWT. +Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`username` (Optional, string)**: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. + +## client.security.authenticate [_security.authenticate] +Authenticate a user. + +Authenticates a user and returns information about the authenticated user. +Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). +A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. +If the user cannot be authenticated, this API returns a 401 status code. +```ts +client.security.authenticate() +``` + + +## client.security.bulkDeleteRole [_security.bulk_delete_role] +Bulk delete roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk delete roles API cannot delete roles that are defined in roles files. +```ts +client.security.bulkDeleteRole({ names }) +``` + +### Arguments [_arguments_security.bulk_delete_role] + +#### Request (object) [_request_security.bulk_delete_role] +- **`names` (string[])**: An array of role names to delete +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.bulkPutRole [_security.bulk_put_role] +Bulk create or update roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk create or update roles API cannot update roles that are defined in roles files. +```ts +client.security.bulkPutRole({ roles }) +``` + +### Arguments [_arguments_security.bulk_put_role] + +#### Request (object) [_request_security.bulk_put_role] +- **`roles` (Record)**: A dictionary of role name to RoleDescriptor objects to add or update +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.bulkUpdateApiKeys [_security.bulk_update_api_keys] +Bulk update API keys. +Update the attributes for multiple API keys. + +IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. + +This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. + +It is not possible to update expired or invalidated API keys. + +This API supports updates to API key access scope, metadata and expiration. +The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. + +A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. +```ts +client.security.bulkUpdateApiKeys({ ids }) +``` + +### Arguments [_arguments_security.bulk_update_api_keys] + +#### Request (object) [_request_security.bulk_update_api_keys] +- **`ids` (string \| string[])**: The API key identifiers. +- **`expiration` (Optional, string \| -1 \| 0)**: Expiration time for the API keys. +By default, API keys never expire. +This property can be omitted to leave the value unchanged. +- **`metadata` (Optional, Record)**: Arbitrary nested metadata to associate with the API keys. +Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. +Any information specified with this parameter fully replaces metadata previously associated with the API key. +- **`role_descriptors` (Optional, Record)**: The role descriptors to assign to the API keys. +An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. +The structure of a role descriptor is the same as the request for the create API keys API. + +## client.security.changePassword [_security.change_password] +Change passwords. + +Change the passwords of users in the native realm and built-in users. +```ts +client.security.changePassword({ ... }) +``` + +### Arguments [_arguments_security.change_password] + +#### Request (object) [_request_security.change_password] +- **`username` (Optional, string)**: The user whose password you want to change. If you do not specify this +parameter, the password is changed for the current user. +- **`password` (Optional, string)**: The new password value. Passwords must be at least 6 characters long. +- **`password_hash` (Optional, string)**: A hash of the new password value. This must be produced using the same +hashing algorithm as has been configured for password storage. For more details, +see the explanation of the `xpack.security.authc.password_hashing.algorithm` +setting. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.clearApiKeyCache [_security.clear_api_key_cache] +Clear the API key cache. + +Evict a subset of all entries from the API key cache. +The cache is also automatically cleared on state changes of the security index. +```ts +client.security.clearApiKeyCache({ ids }) +``` + +### Arguments [_arguments_security.clear_api_key_cache] + +#### Request (object) [_request_security.clear_api_key_cache] +- **`ids` (string \| string[])**: List of API key IDs to evict from the API key cache. +To evict all API keys, use `*`. +Does not support other wildcard patterns. + +## client.security.clearCachedPrivileges [_security.clear_cached_privileges] +Clear the privileges cache. + +Evict privileges from the native application privilege cache. +The cache is also automatically cleared for applications that have their privileges updated. +```ts +client.security.clearCachedPrivileges({ application }) +``` + +### Arguments [_arguments_security.clear_cached_privileges] + +#### Request (object) [_request_security.clear_cached_privileges] +- **`application` (string)**: A list of applications. +To clear all applications, use an asterism (`*`). +It does not support other wildcard patterns. + +## client.security.clearCachedRealms [_security.clear_cached_realms] +Clear the user cache. + +Evict users from the user cache. +You can completely clear the cache or evict specific users. + +User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. +There are realm settings that you can use to configure the user cache. +For more information, refer to the documentation about controlling the user cache. +```ts +client.security.clearCachedRealms({ realms }) +``` + +### Arguments [_arguments_security.clear_cached_realms] + +#### Request (object) [_request_security.clear_cached_realms] +- **`realms` (string \| string[])**: A list of realms. +To clear all realms, use an asterisk (`*`). +It does not support other wildcard patterns. +- **`usernames` (Optional, string[])**: A list of the users to clear from the cache. +If you do not specify this parameter, the API evicts all users from the user cache. + +## client.security.clearCachedRoles [_security.clear_cached_roles] +Clear the roles cache. + +Evict roles from the native role cache. +```ts +client.security.clearCachedRoles({ name }) +``` + +### Arguments [_arguments_security.clear_cached_roles] + +#### Request (object) [_request_security.clear_cached_roles] +- **`name` (string \| string[])**: A list of roles to evict from the role cache. +To evict all roles, use an asterisk (`*`). +It does not support other wildcard patterns. + +## client.security.clearCachedServiceTokens [_security.clear_cached_service_tokens] +Clear service account token caches. + +Evict a subset of all entries from the service account token caches. +Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. +This API clears matching entries from both caches. + +The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. +The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. +```ts +client.security.clearCachedServiceTokens({ namespace, service, name }) +``` + +### Arguments [_arguments_security.clear_cached_service_tokens] + +#### Request (object) [_request_security.clear_cached_service_tokens] +- **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The name of the service, which must be unique within its namespace. +- **`name` (string \| string[])**: A list of token names to evict from the service account token caches. +Use a wildcard (`*`) to evict all tokens that belong to a service account. +It does not support other wildcard patterns. + +## client.security.createApiKey [_security.create_api_key] +Create an API key. + +Create an API key for access without requiring basic authentication. + +IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. +If you specify privileges, the API returns an error. + +A successful request returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. + +NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. + +The API keys are created by the Elasticsearch API key service, which is automatically enabled. +To configure or turn off the API key service, refer to API key service setting documentation. +```ts +client.security.createApiKey({ ... }) +``` + +### Arguments [_arguments_security.create_api_key] + +#### Request (object) [_request_security.create_api_key] +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. +By default, API keys never expire. +- **`name` (Optional, string)**: A name for the API key. +- **`role_descriptors` (Optional, Record)**: An array of role descriptors for this API key. +When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. +If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. +The structure of role descriptor is the same as the request for the create role API. +For more details, refer to the create or update roles API. + +NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. +In this case, you must explicitly specify a role descriptor with no privileges. +The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.createCrossClusterApiKey [_security.create_cross_cluster_api_key] +Create a cross-cluster API key. + +Create an API key of the `cross_cluster` type for the API key based remote cluster access. +A `cross_cluster` API key cannot be used to authenticate through the REST interface. + +IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. + +Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. + +NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. + +A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. + +Cross-cluster API keys can only be updated with the update cross-cluster API key API. +Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. +```ts +client.security.createCrossClusterApiKey({ access, name }) +``` + +### Arguments [_arguments_security.create_cross_cluster_api_key] + +#### Request (object) [_request_security.create_cross_cluster_api_key] +- **`access` ({ replication, search })**: The access to be granted to this API key. +The access is composed of permissions for cross-cluster search and cross-cluster replication. +At least one of them must be specified. + +NOTE: No explicit privileges should be specified for either search or replication access. +The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. +- **`name` (string)**: Specifies the name for this API key. +- **`expiration` (Optional, string \| -1 \| 0)**: Expiration time for the API key. +By default, API keys never expire. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. + +## client.security.createServiceToken [_security.create_service_token] +Create a service account token. + +Create a service accounts token for access without requiring basic authentication. + +NOTE: Service account tokens never expire. +You must actively delete them if they are no longer needed. +```ts +client.security.createServiceToken({ namespace, service }) +``` + +### Arguments [_arguments_security.create_service_token] + +#### Request (object) [_request_security.create_service_token] +- **`namespace` (string)**: The name of the namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The name of the service. +- **`name` (Optional, string)**: The name for the service account token. +If omitted, a random name will be generated. + +Token names must be at least one and no more than 256 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + +NOTE: Token names must be unique in the context of the associated service account. +They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.delegatePki [_security.delegate_pki] +Delegate PKI authentication. + +This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. +The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. +A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. + +This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. + +IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. +This is part of the TLS authentication process and it is delegated to the proxy that calls this API. +The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. +```ts +client.security.delegatePki({ x509_certificate_chain }) +``` + +### Arguments [_arguments_security.delegate_pki] + +#### Request (object) [_request_security.delegate_pki] +- **`x509_certificate_chain` (string[])**: The X509Certificate chain, which is represented as an ordered string array. +Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + +The first element is the target certificate that contains the subject distinguished name that is requesting access. +This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. + +## client.security.deletePrivileges [_security.delete_privileges] +Delete application privileges. + +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. +```ts +client.security.deletePrivileges({ application, name }) +``` + +### Arguments [_arguments_security.delete_privileges] + +#### Request (object) [_request_security.delete_privileges] +- **`application` (string)**: The name of the application. +Application privileges are always associated with exactly one application. +- **`name` (string \| string[])**: The name of the privilege. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.deleteRole [_security.delete_role] +Delete roles. + +Delete roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The delete roles API cannot remove roles that are defined in roles files. +```ts +client.security.deleteRole({ name }) +``` + +### Arguments [_arguments_security.delete_role] + +#### Request (object) [_request_security.delete_role] +- **`name` (string)**: The name of the role. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.deleteRoleMapping [_security.delete_role_mapping] +Delete role mappings. + +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The delete role mappings API cannot remove role mappings that are defined in role mapping files. +```ts +client.security.deleteRoleMapping({ name }) +``` + +### Arguments [_arguments_security.delete_role_mapping] + +#### Request (object) [_request_security.delete_role_mapping] +- **`name` (string)**: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.deleteServiceToken [_security.delete_service_token] +Delete service account tokens. + +Delete service account tokens for a service in a specified namespace. +```ts +client.security.deleteServiceToken({ namespace, service, name }) +``` + +### Arguments [_arguments_security.delete_service_token] + +#### Request (object) [_request_security.delete_service_token] +- **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The service name. +- **`name` (string)**: The name of the service account token. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.deleteUser [_security.delete_user] +Delete users. + +Delete users from the native realm. +```ts +client.security.deleteUser({ username }) +``` + +### Arguments [_arguments_security.delete_user] + +#### Request (object) [_request_security.delete_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.disableUser [_security.disable_user] +Disable users. + +Disable users in the native realm. +By default, when you create users, they are enabled. +You can use this API to revoke a user's access to Elasticsearch. +```ts +client.security.disableUser({ username }) +``` + +### Arguments [_arguments_security.disable_user] + +#### Request (object) [_request_security.disable_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.disableUserProfile [_security.disable_user_profile] +Disable a user profile. + +Disable user profiles so that they are not visible in user profile searches. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. +To re-enable a disabled user profile, use the enable user profile API . +```ts +client.security.disableUserProfile({ uid }) +``` + +### Arguments [_arguments_security.disable_user_profile] + +#### Request (object) [_request_security.disable_user_profile] +- **`uid` (string)**: Unique identifier for the user profile. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', it does nothing with refreshes. + +## client.security.enableUser [_security.enable_user] +Enable users. + +Enable users in the native realm. +By default, when you create users, they are enabled. +```ts +client.security.enableUser({ username }) +``` + +### Arguments [_arguments_security.enable_user] + +#### Request (object) [_request_security.enable_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.enableUserProfile [_security.enable_user_profile] +Enable a user profile. + +Enable user profiles to make them visible in user profile searches. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +When you activate a user profile, it's automatically enabled and visible in user profile searches. +If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. +```ts +client.security.enableUserProfile({ uid }) +``` + +### Arguments [_arguments_security.enable_user_profile] + +#### Request (object) [_request_security.enable_user_profile] +- **`uid` (string)**: A unique identifier for the user profile. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +## client.security.enrollKibana [_security.enroll_kibana] +Enroll Kibana. + +Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. + +NOTE: This API is currently intended for internal use only by Kibana. +Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. +```ts +client.security.enrollKibana() +``` + + +## client.security.enrollNode [_security.enroll_node] +Enroll a node. + +Enroll a new node to allow it to join an existing cluster with security features enabled. + +The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. +The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. +```ts +client.security.enrollNode() +``` + + +## client.security.getApiKey [_security.get_api_key] +Get API key information. + +Retrieves information for one or more API keys. +NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. +```ts +client.security.getApiKey({ ... }) +``` + +### Arguments [_arguments_security.get_api_key] + +#### Request (object) [_request_security.get_api_key] +- **`id` (Optional, string)**: An API key id. +This parameter cannot be used with any of `name`, `realm_name` or `username`. +- **`name` (Optional, string)**: An API key name. +This parameter cannot be used with any of `id`, `realm_name` or `username`. +It supports prefix search with wildcard. +- **`owner` (Optional, boolean)**: A boolean flag that can be used to query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +- **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors +associated with the API key. An API key's actual +permission is the intersection of its assigned role +descriptors and the owner user's role descriptors. +- **`active_only` (Optional, boolean)**: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. + +## client.security.getBuiltinPrivileges [_security.get_builtin_privileges] +Get builtin privileges. + +Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. +```ts +client.security.getBuiltinPrivileges() +``` + + +## client.security.getPrivileges [_security.get_privileges] +Get application privileges. + +To use this API, you must have one of the following privileges: + +* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. +```ts +client.security.getPrivileges({ ... }) +``` + +### Arguments [_arguments_security.get_privileges] + +#### Request (object) [_request_security.get_privileges] +- **`application` (Optional, string)**: The name of the application. +Application privileges are always associated with exactly one application. +If you do not specify this parameter, the API returns information about all privileges for all applications. +- **`name` (Optional, string \| string[])**: The name of the privilege. +If you do not specify this parameter, the API returns information about all privileges for the requested application. + +## client.security.getRole [_security.get_role] +Get roles. + +Get roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The get roles API cannot retrieve roles that are defined in roles files. +```ts +client.security.getRole({ ... }) +``` + +### Arguments [_arguments_security.get_role] + +#### Request (object) [_request_security.get_role] +- **`name` (Optional, string \| string[])**: The name of the role. +You can specify multiple roles as a list. +If you do not specify this parameter, the API returns information about all roles. + +## client.security.getRoleMapping [_security.get_role_mapping] +Get role mappings. + +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The get role mappings API cannot retrieve role mappings that are defined in role mapping files. +```ts +client.security.getRoleMapping({ ... }) +``` + +### Arguments [_arguments_security.get_role_mapping] + +#### Request (object) [_request_security.get_role_mapping] +- **`name` (Optional, string \| string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. + +## client.security.getServiceAccounts [_security.get_service_accounts] +Get service accounts. + +Get a list of service accounts that match the provided path parameters. + +NOTE: Currently, only the `elastic/fleet-server` service account is available. +```ts +client.security.getServiceAccounts({ ... }) +``` + +### Arguments [_arguments_security.get_service_accounts] + +#### Request (object) [_request_security.get_service_accounts] +- **`namespace` (Optional, string)**: The name of the namespace. +Omit this parameter to retrieve information about all service accounts. +If you omit this parameter, you must also omit the `service` parameter. +- **`service` (Optional, string)**: The service name. +Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. + +## client.security.getServiceCredentials [_security.get_service_credentials] +Get service account credentials. + +To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). + +The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. + +NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. +Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. +```ts +client.security.getServiceCredentials({ namespace, service }) +``` + +### Arguments [_arguments_security.get_service_credentials] + +#### Request (object) [_request_security.get_service_credentials] +- **`namespace` (string)**: The name of the namespace. +- **`service` (string)**: The service name. + +## client.security.getSettings [_security.get_settings] +Get security index settings. + +Get the user-configurable settings for the security internal index (`.security` and associated indices). +Only a subset of the index settings — those that are user-configurable—will be shown. +This includes: + +* `index.auto_expand_replicas` +* `index.number_of_replicas` +```ts +client.security.getSettings({ ... }) +``` + +### Arguments [_arguments_security.get_settings] + +#### Request (object) [_request_security.get_settings] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.security.getToken [_security.get_token] +Get a token. + +Create a bearer token for access without requiring basic authentication. +The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. +Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. +When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. + +The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. + +A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. + +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. +That time period is defined by the `xpack.security.authc.token.timeout` setting. +If you want to invalidate a token immediately, you can do so by using the invalidate token API. +```ts +client.security.getToken({ ... }) +``` + +### Arguments [_arguments_security.get_token] + +#### Request (object) [_request_security.get_token] +- **`grant_type` (Optional, Enum("password" \| "client_credentials" \| "_kerberos" \| "refresh_token"))**: The type of grant. +Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. +- **`scope` (Optional, string)**: The scope of the token. +Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`kerberos_ticket` (Optional, string)**: The base64 encoded kerberos ticket. +If you specify the `_kerberos` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`refresh_token` (Optional, string)**: The string that was returned when you created the token, which enables you to extend its life. +If you specify the `refresh_token` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`username` (Optional, string)**: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. + +## client.security.getUser [_security.get_user] +Get users. + +Get information about users in the native realm and built-in users. +```ts +client.security.getUser({ ... }) +``` + +### Arguments [_arguments_security.get_user] + +#### Request (object) [_request_security.get_user] +- **`username` (Optional, string \| string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. + +## client.security.getUserPrivileges [_security.get_user_privileges] +Get user privileges. + +Get the security privileges for the logged in user. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. +To check whether a user has a specific list of privileges, use the has privileges API. +```ts +client.security.getUserPrivileges() +``` + + +## client.security.getUserProfile [_security.get_user_profile] +Get a user profile. + +Get a user's profile using the unique profile ID. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. +```ts +client.security.getUserProfile({ uid }) +``` + +### Arguments [_arguments_security.get_user_profile] + +#### Request (object) [_request_security.get_user_profile] +- **`uid` (string \| string[])**: A unique identifier for the user profile. +- **`data` (Optional, string \| string[])**: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content use `data=` to retrieve content nested under the specified ``. +By default returns no `data` content. + +## client.security.grantApiKey [_security.grant_api_key] +Grant an API key. + +Create an API key on behalf of another user. +This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. +The caller must have authentication credentials for the user on whose behalf the API key will be created. +It is not possible to use this API to create an API key without that user's credentials. +The supported user authentication credential types are: + +* username and password +* Elasticsearch access tokens +* JWTs + +The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. +In this case, the API key will be created on behalf of the impersonated user. + +This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. + +A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. +```ts +client.security.grantApiKey({ api_key, grant_type }) +``` + +### Arguments [_arguments_security.grant_api_key] + +#### Request (object) [_request_security.grant_api_key] +- **`api_key` ({ name, expiration, role_descriptors, metadata })**: The API key. +- **`grant_type` (Enum("access_token" \| "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. +- **`access_token` (Optional, string)**: The user's access token. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +- **`username` (Optional, string)**: The user name that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`run_as` (Optional, string)**: The name of the user to be impersonated. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +## client.security.hasPrivileges [_security.has_privileges] +Check user privileges. + +Determine whether the specified user has a specified list of privileges. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. +```ts +client.security.hasPrivileges({ ... }) +``` + +### Arguments [_arguments_security.has_privileges] + +#### Request (object) [_request_security.has_privileges] +- **`user` (Optional, string)**: Username +- **`application` (Optional, { application, privileges, resources }[])** +- **`cluster` (Optional, Enum("all" \| "cancel_task" \| "create_snapshot" \| "cross_cluster_replication" \| "cross_cluster_search" \| "delegate_pki" \| "grant_api_key" \| "manage" \| "manage_api_key" \| "manage_autoscaling" \| "manage_behavioral_analytics" \| "manage_ccr" \| "manage_data_frame_transforms" \| "manage_data_stream_global_retention" \| "manage_enrich" \| "manage_ilm" \| "manage_index_templates" \| "manage_inference" \| "manage_ingest_pipelines" \| "manage_logstash_pipelines" \| "manage_ml" \| "manage_oidc" \| "manage_own_api_key" \| "manage_pipeline" \| "manage_rollup" \| "manage_saml" \| "manage_search_application" \| "manage_search_query_rules" \| "manage_search_synonyms" \| "manage_security" \| "manage_service_account" \| "manage_slm" \| "manage_token" \| "manage_transform" \| "manage_user_profile" \| "manage_watcher" \| "monitor" \| "monitor_data_frame_transforms" \| "monitor_data_stream_global_retention" \| "monitor_enrich" \| "monitor_inference" \| "monitor_ml" \| "monitor_rollup" \| "monitor_snapshot" \| "monitor_stats" \| "monitor_text_structure" \| "monitor_transform" \| "monitor_watcher" \| "none" \| "post_behavioral_analytics_event" \| "read_ccr" \| "read_fleet_secrets" \| "read_ilm" \| "read_pipeline" \| "read_security" \| "read_slm" \| "transport_client" \| "write_connector_secrets" \| "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. +- **`index` (Optional, { names, privileges, allow_restricted_indices }[])** + +## client.security.hasPrivilegesUserProfile [_security.has_privileges_user_profile] +Check user profile privileges. + +Determine whether the users associated with the specified user profile IDs have all the requested privileges. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. +```ts +client.security.hasPrivilegesUserProfile({ uids, privileges }) +``` + +### Arguments [_arguments_security.has_privileges_user_profile] + +#### Request (object) [_request_security.has_privileges_user_profile] +- **`uids` (string[])**: A list of profile IDs. The privileges are checked for associated users of the profiles. +- **`privileges` ({ application, cluster, index })**: An object containing all the privileges to be checked. + +## client.security.invalidateApiKey [_security.invalidate_api_key] +Invalidate API keys. + +This API invalidates API keys created by the create API key or grant API key APIs. +Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. + +To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. +The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. +The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. +The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. +In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: + +- Set the parameter `owner=true`. +- Or, set both `username` and `realm_name` to match the user's identity. +- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. +```ts +client.security.invalidateApiKey({ ... }) +``` + +### Arguments [_arguments_security.invalidate_api_key] + +#### Request (object) [_request_security.invalidate_api_key] +- **`id` (Optional, string)** +- **`ids` (Optional, string[])**: A list of API key ids. +This parameter cannot be used with any of `name`, `realm_name`, or `username`. +- **`name` (Optional, string)**: An API key name. +This parameter cannot be used with any of `ids`, `realm_name` or `username`. +- **`owner` (Optional, boolean)**: Query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + +NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. + +## client.security.invalidateToken [_security.invalidate_token] +Invalidate a token. + +The access tokens returned by the get token API have a finite period of time for which they are valid. +After that time period, they can no longer be used. +The time period is defined by the `xpack.security.authc.token.timeout` setting. + +The refresh tokens returned by the get token API are only valid for 24 hours. +They can also be used exactly once. +If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. + +NOTE: While all parameters are optional, at least one of them is required. +More specifically, either one of `token` or `refresh_token` parameters is required. +If none of these two are specified, then `realm_name` and/or `username` need to be specified. +```ts +client.security.invalidateToken({ ... }) +``` + +### Arguments [_arguments_security.invalidate_token] + +#### Request (object) [_request_security.invalidate_token] +- **`token` (Optional, string)**: An access token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +- **`refresh_token` (Optional, string)**: A refresh token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `refresh_token` or `token`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `refresh_token` or `token`. + +## client.security.oidcAuthenticate [_security.oidc_authenticate] +Authenticate OpenID Connect. + +Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. +```ts +client.security.oidcAuthenticate({ nonce, redirect_uri, state }) +``` + +### Arguments [_arguments_security.oidc_authenticate] + +#### Request (object) [_request_security.oidc_authenticate] +- **`nonce` (string)**: Associate a client session with an ID token and mitigate replay attacks. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +- **`redirect_uri` (string)**: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. +This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. +- **`state` (string)**: Maintain state between the authentication request and the response. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +- **`realm` (Optional, string)**: The name of the OpenID Connect realm. +This property is useful in cases where multiple realms are defined. + +## client.security.oidcLogout [_security.oidc_logout] +Logout of OpenID Connect. + +Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. + +If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. +```ts +client.security.oidcLogout({ token }) +``` + +### Arguments [_arguments_security.oidc_logout] + +#### Request (object) [_request_security.oidc_logout] +- **`token` (string)**: The access token to be invalidated. +- **`refresh_token` (Optional, string)**: The refresh token to be invalidated. + +## client.security.oidcPrepareAuthentication [_security.oidc_prepare_authentication] +Prepare OpenID connect authentication. + +Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. + +The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. +```ts +client.security.oidcPrepareAuthentication({ ... }) +``` + +### Arguments [_arguments_security.oidc_prepare_authentication] + +#### Request (object) [_request_security.oidc_prepare_authentication] +- **`iss` (Optional, string)**: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. +It cannot be specified when *realm* is specified. +One of *realm* or *iss* is required. +- **`login_hint` (Optional, string)**: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. +This parameter is not valid when *realm* is specified. +- **`nonce` (Optional, string)**: The value used to associate a client session with an ID token and to mitigate replay attacks. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. +- **`realm` (Optional, string)**: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. +It cannot be specified when *iss* is specified. +One of *realm* or *iss* is required. +- **`state` (Optional, string)**: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. + +## client.security.putPrivileges [_security.put_privileges] +Create or update application privileges. + +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +Application names are formed from a prefix, with an optional suffix that conform to the following rules: + +* The prefix must begin with a lowercase ASCII letter. +* The prefix must contain only ASCII letters or digits. +* The prefix must be at least 3 characters long. +* If the suffix exists, it must begin with either a dash `-` or `_`. +* The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. +* No part of the name can contain whitespace. + +Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. + +Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. +```ts +client.security.putPrivileges({ ... }) +``` + +### Arguments [_arguments_security.put_privileges] + +#### Request (object) [_request_security.put_privileges] +- **`privileges` (Optional, Record>)** +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.putRole [_security.put_role] +Create or update roles. + +The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. +The create or update roles API cannot update roles that are defined in roles files. +File-based role management is not available in Elastic Serverless. +```ts +client.security.putRole({ name }) +``` + +### Arguments [_arguments_security.put_role] + +#### Request (object) [_request_security.put_role] +- **`name` (string)**: The name of the role. +- **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. +- **`cluster` (Optional, Enum("all" \| "cancel_task" \| "create_snapshot" \| "cross_cluster_replication" \| "cross_cluster_search" \| "delegate_pki" \| "grant_api_key" \| "manage" \| "manage_api_key" \| "manage_autoscaling" \| "manage_behavioral_analytics" \| "manage_ccr" \| "manage_data_frame_transforms" \| "manage_data_stream_global_retention" \| "manage_enrich" \| "manage_ilm" \| "manage_index_templates" \| "manage_inference" \| "manage_ingest_pipelines" \| "manage_logstash_pipelines" \| "manage_ml" \| "manage_oidc" \| "manage_own_api_key" \| "manage_pipeline" \| "manage_rollup" \| "manage_saml" \| "manage_search_application" \| "manage_search_query_rules" \| "manage_search_synonyms" \| "manage_security" \| "manage_service_account" \| "manage_slm" \| "manage_token" \| "manage_transform" \| "manage_user_profile" \| "manage_watcher" \| "monitor" \| "monitor_data_frame_transforms" \| "monitor_data_stream_global_retention" \| "monitor_enrich" \| "monitor_inference" \| "monitor_ml" \| "monitor_rollup" \| "monitor_snapshot" \| "monitor_stats" \| "monitor_text_structure" \| "monitor_transform" \| "monitor_watcher" \| "none" \| "post_behavioral_analytics_event" \| "read_ccr" \| "read_fleet_secrets" \| "read_ilm" \| "read_pipeline" \| "read_security" \| "read_slm" \| "transport_client" \| "write_connector_secrets" \| "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +- **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. +- **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. +- **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. + +NOTE: Remote indices are effective for remote clusters configured with the API key based model. +They have no effect for remote clusters configured with the certificate based model. +- **`remote_cluster` (Optional, { clusters, privileges }[])**: A list of remote cluster permissions entries. +- **`metadata` (Optional, Record)**: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. +- **`run_as` (Optional, string[])**: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. +- **`description` (Optional, string)**: Optional description of the role descriptor +- **`transient_metadata` (Optional, Record)**: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.putRoleMapping [_security.put_role_mapping] +Create or update role mappings. + +Role mappings define which roles are assigned to each user. +Each mapping has rules that identify users and a list of roles that are granted to those users. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. + +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using the create or update roles API or roles files. + +**Role templates** + +The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. +For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. +The `roles` field is used for this purpose. + +For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. +The `role_templates` field is used for this purpose. + +NOTE: To use role templates successfully, the relevant scripting feature must be enabled. +Otherwise, all attempts to create a role mapping with role templates fail. + +All of the user fields that are available in the role mapping rules are also available in the role templates. +Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. + +By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. +If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. +```ts +client.security.putRoleMapping({ name }) +``` + +### Arguments [_arguments_security.put_role_mapping] + +#### Request (object) [_request_security.put_role_mapping] +- **`name` (string)**: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +- **`enabled` (Optional, boolean)**: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. +- **`metadata` (Optional, Record)**: Additional metadata that helps define which roles are assigned to each user. +Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`roles` (Optional, string[])**: A list of role names that are granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +- **`role_templates` (Optional, { format, template }[])**: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +- **`rules` (Optional, { any, all, field, except })**: The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. +- **`run_as` (Optional, string[])** +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.putUser [_security.put_user] +Create or update users. + +Add and update users in the native realm. +A password is required for adding a new user but is optional when updating an existing user. +To change a user's password without updating any other fields, use the change password API. +```ts +client.security.putUser({ username }) +``` + +### Arguments [_arguments_security.put_user] + +#### Request (object) [_request_security.put_user] +- **`username` (string)**: An identifier for the user. + +NOTE: Usernames must be at least 1 and no more than 507 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. +Leading or trailing whitespace is not allowed. +- **`email` (Optional, string \| null)**: The email of the user. +- **`full_name` (Optional, string \| null)**: The full name of the user. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the user. +- **`password` (Optional, string)**: The user's password. +Passwords must be at least 6 characters long. +When adding a user, one of `password` or `password_hash` is required. +When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password +- **`password_hash` (Optional, string)**: A hash of the user's password. +This must be produced using the same hashing algorithm as has been configured for password storage. +For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. +Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. +The `password` parameter and the `password_hash` parameter cannot be used in the same request. +- **`roles` (Optional, string[])**: A set of roles the user has. +The roles determine the user's access permissions. +To create a user without any roles, specify an empty list (`[]`). +- **`enabled` (Optional, boolean)**: Specifies whether the user is enabled. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. +These values have the same meaning as in the index API, but the default value for this API is true. + +## client.security.queryApiKeys [_security.query_api_keys] +Find API keys with a query. + +Get a paginated list of API keys and their information. +You can optionally filter the results with a query. + +To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. +If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. +```ts +client.security.queryApiKeys({ ... }) +``` + +### Arguments [_arguments_security.query_api_keys] + +#### Request (object) [_request_security.query_api_keys] +- **`aggregations` (Optional, Record)**: Any aggregations to run over the corpus of returned API keys. +Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. +This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, +`cardinality`, `value_count`, `composite`, `filter`, and `filters`. +Additionally, aggregations only run over the same subset of fields that query works with. +- **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which API keys to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following public information associated with an API key: `id`, `type`, `name`, +`creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + +NOTE: The queryable string values associated with API keys are internally mapped as keywords. +Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. +Such a match query is hence equivalent to a `term` query. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. +Other than `id`, all public fields of an API key are eligible for sorting. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])**: The search after definition. +- **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors associated with the API key. +An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). +An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile UID for the API key owner principal. +If it exists, the profile UID is returned under the `profile_uid` response field for each API key. +- **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. + +## client.security.queryRole [_security.query_role] +Find roles with a query. + +Get roles in a paginated manner. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. +You can optionally filter the results with a query. +Also, the results can be paginated and sorted. +```ts +client.security.queryRole({ ... }) +``` + +### Arguments [_arguments_security.query_role] + +#### Request (object) [_request_security.query_role] +- **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which roles to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with roles: `name`, `description`, `metadata`, +`applications.application`, `applications.privileges`, and `applications.resources`. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. +You can sort on `username`, `roles`, or `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])**: The search after definition. + +## client.security.queryUser [_security.query_user] +Find users with a query. + +Get information for users in a paginated manner. +You can optionally filter the results with a query. + +NOTE: As opposed to the get user API, built-in users are excluded from the result. +This API is only for native users. +```ts +client.security.queryUser({ ... }) +``` + +### Arguments [_arguments_security.query_user] + +#### Request (object) [_request_security.query_user] +- **`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which users to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. +Fields eligible for sorting are: `username`, `roles`, `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])**: The search after definition +- **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. + +## client.security.samlAuthenticate [_security.saml_authenticate] +Authenticate SAML. + +Submit a SAML response message to Elasticsearch for consumption. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The SAML message that is submitted can be: + +* A response to a SAML authentication request that was previously created using the SAML prepare authentication API. +* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. + +In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. + +After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. +This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. +```ts +client.security.samlAuthenticate({ content, ids }) +``` + +### Arguments [_arguments_security.saml_authenticate] + +#### Request (object) [_request_security.saml_authenticate] +- **`content` (string)**: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. +- **`ids` (string \| string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`realm` (Optional, string)**: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. + +## client.security.samlCompleteLogout [_security.saml_complete_logout] +Logout of SAML completely. + +Verifies the logout response sent from the SAML IdP. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. +This API verifies the response by ensuring the content is relevant and validating its signature. +An empty response is returned if the verification process is successful. +The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. +The caller of this API must prepare the request accordingly so that this API can handle either of them. +```ts +client.security.samlCompleteLogout({ realm, ids }) +``` + +### Arguments [_arguments_security.saml_complete_logout] + +#### Request (object) [_request_security.saml_complete_logout] +- **`realm` (string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. +- **`ids` (string \| string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`query_string` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. +- **`content` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. + +## client.security.samlInvalidate [_security.saml_invalidate] +Invalidate SAML. + +Submit a SAML LogoutRequest message to Elasticsearch for consumption. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The logout request comes from the SAML IdP during an IdP initiated Single Logout. +The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. +After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. +Thus the user can be redirected back to their IdP. +```ts +client.security.samlInvalidate({ query_string }) +``` + +### Arguments [_arguments_security.saml_invalidate] + +#### Request (object) [_request_security.saml_invalidate] +- **`query_string` (string)**: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. +This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. +If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. +In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. +The client application must not attempt to parse or process the string in any way. +- **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. +- **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. + +## client.security.samlLogout [_security.saml_logout] +Logout of SAML. + +Submits a request to invalidate an access token and refresh token. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +This API invalidates the tokens that were generated for a user by the SAML authenticate API. +If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). +```ts +client.security.samlLogout({ token }) +``` + +### Arguments [_arguments_security.saml_logout] + +#### Request (object) [_request_security.saml_logout] +- **`token` (string)**: The access token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. +- **`refresh_token` (Optional, string)**: The refresh token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent refresh token that was received after refreshing the original access token. + +## client.security.samlPrepareAuthentication [_security.saml_prepare_authentication] +Prepare SAML authentication. + +Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +This API returns a URL pointing to the SAML Identity Provider. +You can use the URL to redirect the browser of the user in order to continue the authentication process. +The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. +If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. +These parameters contain the algorithm used for the signature and the signature value itself. +It also returns a random string that uniquely identifies this SAML Authentication request. +The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. +```ts +client.security.samlPrepareAuthentication({ ... }) +``` + +### Arguments [_arguments_security.saml_prepare_authentication] + +#### Request (object) [_request_security.saml_prepare_authentication] +- **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. +The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. +- **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. +You must specify either this parameter or the `acs` parameter. +- **`relay_state` (Optional, string)**: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. +If the Authentication Request is signed, this value is used as part of the signature computation. + +## client.security.samlServiceProviderMetadata [_security.saml_service_provider_metadata] +Create SAML service provider metadata. + +Generate SAML metadata for a SAML 2.0 Service Provider. + +The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. +This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. +```ts +client.security.samlServiceProviderMetadata({ realm_name }) +``` + +### Arguments [_arguments_security.saml_service_provider_metadata] + +#### Request (object) [_request_security.saml_service_provider_metadata] +- **`realm_name` (string)**: The name of the SAML realm in Elasticsearch. + +## client.security.suggestUserProfiles [_security.suggest_user_profiles] +Suggest a user profile. + +Get suggestions for user profiles that match specified search criteria. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. +```ts +client.security.suggestUserProfiles({ ... }) +``` + +### Arguments [_arguments_security.suggest_user_profiles] + +#### Request (object) [_request_security.suggest_user_profiles] +- **`name` (Optional, string)**: A query string used to match name-related fields in user profile documents. +Name-related fields are the user's `username`, `full_name`, and `email`. +- **`size` (Optional, number)**: The number of profiles to return. +- **`data` (Optional, string \| string[])**: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content, use `data=` to retrieve content nested under the specified ``. +By default, the API returns no `data` content. +It is an error to specify `data` as both the query parameter and the request body field. +- **`hint` (Optional, { uids, labels })**: Extra search criteria to improve relevance of the suggestion result. +Profiles matching the spcified hint are ranked higher in the response. +Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. + +## client.security.updateApiKey [_security.update_api_key] +Update an API key. + +Update attributes of an existing API key. +This API supports updates to an API key's access scope, expiration, and metadata. + +To use this API, you must have at least the `manage_own_api_key` cluster privilege. +Users can only update API keys that they created or that were granted to them. +To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. + +Use this API to update API keys created by the create API key or grant API Key APIs. +If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. +It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. + +The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. +This change can occur if the owner user's permissions have changed since the API key was created or last modified. +```ts +client.security.updateApiKey({ id }) +``` + +### Arguments [_arguments_security.update_api_key] + +#### Request (object) [_request_security.update_api_key] +- **`id` (string)**: The ID of the API key to update. +- **`role_descriptors` (Optional, Record)**: The role descriptors to assign to this API key. +The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. +The structure of a role descriptor is the same as the request for the create API keys API. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports a nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this value fully replaces the metadata previously associated with the API key. +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. +By default, API keys never expire. +This property can be omitted to leave the expiration unchanged. + +## client.security.updateCrossClusterApiKey [_security.update_cross_cluster_api_key] +Update a cross-cluster API key. + +Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. + +To use this API, you must have at least the `manage_security` cluster privilege. +Users can only update API keys that they created. +To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. +To update an API key, the owner user's credentials are required. + +It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. + +This API supports updates to an API key's access scope, metadata, and expiration. +The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. + +NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. +```ts +client.security.updateCrossClusterApiKey({ id, access }) +``` + +### Arguments [_arguments_security.update_cross_cluster_api_key] + +#### Request (object) [_request_security.update_cross_cluster_api_key] +- **`id` (string)**: The ID of the cross-cluster API key to update. +- **`access` ({ replication, search })**: The access to be granted to this API key. +The access is composed of permissions for cross cluster search and cross cluster replication. +At least one of them must be specified. +When specified, the new access assignment fully replaces the previously assigned access. +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. +By default, API keys never expire. This property can be omitted to leave the value unchanged. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this information fully replaces metadata previously associated with the API key. + +## client.security.updateSettings [_security.update_settings] +Update security index settings. + +Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + +NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. + +If a specific index is not in use on the system and settings are provided for it, the request will be rejected. +This API does not yet support configuring the settings for indices before they are in use. +```ts +client.security.updateSettings({ ... }) +``` + +### Arguments [_arguments_security.update_settings] + +#### Request (object) [_request_security.update_settings] +- **`security` (Optional, { index })**: Settings for the index used for most security configuration, including native realm users and roles configured with the API. +- **`security-profile` (Optional, { index })**: Settings for the index used to store profile information. +- **`security-tokens` (Optional, { index })**: Settings for the index used to store tokens. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.security.updateUserProfileData [_security.update_user_profile_data] +Update user profile data. + +Update specific data for the user profile that is associated with a unique ID. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +To use this API, you must have one of the following privileges: + +* The `manage_user_profile` cluster privilege. +* The `update_profile_data` global privilege for the namespaces that are referenced in the request. + +This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. +New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. + +For both labels and data, content is namespaced by the top-level fields. +The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. +```ts +client.security.updateUserProfileData({ uid }) +``` + +### Arguments [_arguments_security.update_user_profile_data] + +#### Request (object) [_request_security.update_user_profile_data] +- **`uid` (string)**: A unique identifier for the user profile. +- **`labels` (Optional, Record)**: Searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +- **`data` (Optional, Record)**: Non-searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +The data object is not searchable, but can be retrieved with the get user profile API. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +## client.shutdown.deleteNode [_shutdown.delete_node] +Cancel node shutdown preparations. +Remove a node from the shutdown list so it can resume normal operations. +You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. +Shutdown requests are never removed automatically by Elasticsearch. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. +Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. +```ts +client.shutdown.deleteNode({ node_id }) +``` + +### Arguments [_arguments_shutdown.delete_node] + +#### Request (object) [_request_shutdown.delete_node] +- **`node_id` (string)**: The node id of node to be removed from the shutdown state +- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.shutdown.getNode [_shutdown.get_node] +Get the shutdown status. + +Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. +The API returns status information for each part of the shut down process. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. +```ts +client.shutdown.getNode({ ... }) +``` + +### Arguments [_arguments_shutdown.get_node] + +#### Request (object) [_request_shutdown.get_node] +- **`node_id` (Optional, string \| string[])**: Which node for which to retrieve the shutdown status +- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.shutdown.putNode [_shutdown.put_node] +Prepare a node to be shut down. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. + +If the operator privileges feature is enabled, you must be an operator to use this API. + +The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. +This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. + +You must specify the type of shutdown: `restart`, `remove`, or `replace`. +If a node is already being prepared for shutdown, you can use this API to change the shutdown type. + +IMPORTANT: This API does NOT terminate the Elasticsearch process. +Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. +```ts +client.shutdown.putNode({ node_id, type, reason }) +``` + +### Arguments [_arguments_shutdown.put_node] + +#### Request (object) [_request_shutdown.put_node] +- **`node_id` (string)**: The node identifier. +This parameter is not validated against the cluster's active nodes. +This enables you to register a node for shut down while it is offline. +No error is thrown if you specify an invalid node ID. +- **`type` (Enum("restart" \| "remove" \| "replace"))**: Valid values are restart, remove, or replace. +Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. +Because the node is expected to rejoin the cluster, data is not migrated off of the node. +Use remove when you need to permanently remove a node from the cluster. +The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. +Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. +During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. +- **`reason` (string)**: A human-readable reason that the node is being shut down. +This field provides information for other cluster operators; it does not affect the shut down process. +- **`allocation_delay` (Optional, string)**: Only valid if type is restart. +Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. +This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. +If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. +- **`target_node_name` (Optional, string)**: Only valid if type is replace. +Specifies the name of the node that is replacing the node being shut down. +Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. +During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. +- **`master_timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.simulate.ingest [_simulate.ingest] +Simulate data ingestion. +Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. + +This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. + +The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. +If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. +No data is indexed into Elasticsearch. +Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. +The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. + +This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. +The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. + +By default, the pipeline definitions that are currently in the system are used. +However, you can supply substitute pipeline definitions in the body of the request. +These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. +```ts +client.simulate.ingest({ docs }) +``` + +### Arguments [_arguments_simulate.ingest] + +#### Request (object) [_request_simulate.ingest] +- **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. +- **`index` (Optional, string)**: The index to simulate ingesting into. +This value can be overridden by specifying an index on each document. +If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. +- **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. +- **`index_template_substitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. +- **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** +- **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. +If you don’t specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +- **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. +This value can be used to override the default pipeline of the index. + +## client.slm.deleteLifecycle [_slm.delete_lifecycle] +Delete a policy. +Delete a snapshot lifecycle policy definition. +This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. +```ts +client.slm.deleteLifecycle({ policy_id }) +``` + +### Arguments [_arguments_slm.delete_lifecycle] + +#### Request (object) [_request_slm.delete_lifecycle] +- **`policy_id` (string)**: The id of the snapshot lifecycle policy to remove +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.executeLifecycle [_slm.execute_lifecycle] +Run a policy. +Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. +```ts +client.slm.executeLifecycle({ policy_id }) +``` + +### Arguments [_arguments_slm.execute_lifecycle] + +#### Request (object) [_request_slm.execute_lifecycle] +- **`policy_id` (string)**: The id of the snapshot lifecycle policy to be executed +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.executeRetention [_slm.execute_retention] +Run a retention policy. +Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +The retention policy is normally applied according to its schedule. +```ts +client.slm.executeRetention({ ... }) +``` + +### Arguments [_arguments_slm.execute_retention] + +#### Request (object) [_request_slm.execute_retention] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.getLifecycle [_slm.get_lifecycle] +Get policy information. +Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. +```ts +client.slm.getLifecycle({ ... }) +``` + +### Arguments [_arguments_slm.get_lifecycle] + +#### Request (object) [_request_slm.get_lifecycle] +- **`policy_id` (Optional, string \| string[])**: List of snapshot lifecycle policies to retrieve +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.getStats [_slm.get_stats] +Get snapshot lifecycle management statistics. +Get global and policy-level statistics about actions taken by snapshot lifecycle management. +```ts +client.slm.getStats({ ... }) +``` + +### Arguments [_arguments_slm.get_stats] + +#### Request (object) [_request_slm.get_stats] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.getStatus [_slm.get_status] +Get the snapshot lifecycle management status. +```ts +client.slm.getStatus({ ... }) +``` + +### Arguments [_arguments_slm.get_status] + +#### Request (object) [_request_slm.get_status] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.slm.putLifecycle [_slm.put_lifecycle] +Create or update a policy. +Create or update a snapshot lifecycle policy. +If the policy already exists, this request increments the policy version. +Only the latest version of a policy is stored. +```ts +client.slm.putLifecycle({ policy_id }) +``` + +### Arguments [_arguments_slm.put_lifecycle] + +#### Request (object) [_request_slm.put_lifecycle] +- **`policy_id` (string)**: The identifier for the snapshot lifecycle policy you want to create or update. +- **`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })**: Configuration for each snapshot created by the policy. +- **`name` (Optional, string)**: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. +- **`repository` (Optional, string)**: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. +- **`retention` (Optional, { expire_after, max_count, min_count })**: Retention rules used to retain and delete snapshots created by the policy. +- **`schedule` (Optional, string)**: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.slm.start [_slm.start] +Start snapshot lifecycle management. +Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +Manually starting SLM is necessary only if it has been stopped using the stop SLM API. +```ts +client.slm.start({ ... }) +``` + +### Arguments [_arguments_slm.start] + +#### Request (object) [_request_slm.start] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.slm.stop [_slm.stop] +Stop snapshot lifecycle management. +Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. +Stopping SLM does not stop any snapshots that are in progress. +You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. + +The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. +Use the get snapshot lifecycle management status API to see if SLM is running. +```ts +client.slm.stop({ ... }) +``` + +### Arguments [_arguments_slm.stop] + +#### Request (object) [_request_slm.stop] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.cleanupRepository [_snapshot.cleanup_repository] +Clean up the snapshot repository. +Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. +```ts +client.snapshot.cleanupRepository({ repository }) +``` + +### Arguments [_arguments_snapshot.cleanup_repository] + +#### Request (object) [_request_snapshot.cleanup_repository] +- **`repository` (string)**: Snapshot repository to clean up. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. + +## client.snapshot.clone [_snapshot.clone] +Clone a snapshot. +Clone part of all of a snapshot into another snapshot in the same repository. +```ts +client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) +``` + +### Arguments [_arguments_snapshot.clone] + +#### Request (object) [_request_snapshot.clone] +- **`repository` (string)**: A repository name +- **`snapshot` (string)**: The name of the snapshot to clone from +- **`target_snapshot` (string)**: The name of the cloned snapshot to create +- **`indices` (string)** +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node + +## client.snapshot.create [_snapshot.create] +Create a snapshot. +Take a snapshot of a cluster or of data streams and indices. +```ts +client.snapshot.create({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.create] + +#### Request (object) [_request_snapshot.create] +- **`repository` (string)**: Repository for the snapshot. +- **`snapshot` (string)**: Name of the snapshot. Must be unique in the repository. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. +- **`include_global_state` (Optional, boolean)**: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). +- **`indices` (Optional, string \| string[])**: Data streams and indices to include in the snapshot. Supports multi-target syntax. Includes all data streams and indices by default. +- **`feature_states` (Optional, string[])**: Feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. +- **`metadata` (Optional, Record)**: Optional metadata for the snapshot. May have any contents. Must be less than 1024 bytes. This map is not automatically generated by Elasticsearch. +- **`partial` (Optional, boolean)**: If `true`, allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. + +## client.snapshot.createRepository [_snapshot.create_repository] +Create or update a snapshot repository. +IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +To register a snapshot repository, the cluster's global metadata must be writeable. +Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html) + +```ts +client.snapshot.createRepository({ repository }) +``` + +### Arguments [_arguments_snapshot.create_repository] + +#### Request (object) [_request_snapshot.create_repository] +- **`repository` (string)**: A repository name +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout +- **`verify` (Optional, boolean)**: Whether to verify the repository after creation + +## client.snapshot.delete [_snapshot.delete] +Delete snapshots. +```ts +client.snapshot.delete({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.delete] + +#### Request (object) [_request_snapshot.delete] +- **`repository` (string)**: A repository name +- **`snapshot` (string)**: A list of snapshot names +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the matching snapshots are all deleted. +If `false`, the request returns a response as soon as the deletes are scheduled. + +## client.snapshot.deleteRepository [_snapshot.delete_repository] +Delete snapshot repositories. +When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +The snapshots themselves are left untouched and in place. +```ts +client.snapshot.deleteRepository({ repository }) +``` + +### Arguments [_arguments_snapshot.delete_repository] + +#### Request (object) [_request_snapshot.delete_repository] +- **`repository` (string \| string[])**: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout + +## client.snapshot.get [_snapshot.get] +Get snapshot information. +```ts +client.snapshot.get({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.get] + +#### Request (object) [_request_snapshot.get] +- **`repository` (string)**: List of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. +- **`snapshot` (string \| string[])**: List of snapshot names to retrieve. Also accepts wildcards (*). +- To get information about all snapshots in a registered repository, use a wildcard (*) or _all. +- To get information about any snapshots that are currently running, use _current. +- **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error for any snapshots that are unavailable. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`verbose` (Optional, boolean)**: If true, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. +- **`index_details` (Optional, boolean)**: If true, returns additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. Defaults to false, meaning that this information is omitted. +- **`index_names` (Optional, boolean)**: If true, returns the name of each index in each snapshot. +- **`include_repository` (Optional, boolean)**: If true, returns the repository name in each snapshot. +- **`sort` (Optional, Enum("start_time" \| "duration" \| "name" \| "index_count" \| "repository" \| "shard_count" \| "failed_shard_count"))**: Allows setting a sort order for the result. Defaults to start_time, i.e. sorting by snapshot start time stamp. +- **`size` (Optional, number)**: Maximum number of snapshots to return. Defaults to 0 which means return all that match the request without limit. +- **`order` (Optional, Enum("asc" \| "desc"))**: Sort order. Valid values are asc for ascending and desc for descending order. Defaults to asc, meaning ascending order. +- **`after` (Optional, string)**: Offset identifier to start pagination from as returned by the next field in the response body. +- **`offset` (Optional, number)**: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. +- **`from_sort_value` (Optional, string)**: Value of the current sort column at which to start retrieval. Can either be a string snapshot- or repository name when sorting by snapshot or repository name, a millisecond time value or a number when sorting by index- or shard count. +- **`slm_policy_filter` (Optional, string)**: Filter snapshots by a list of SLM policy names that snapshots belong to. Also accepts wildcards (*) and combinations of wildcards followed by exclude patterns starting with -. To include snapshots not created by an SLM policy you can use the special pattern _none that will match all snapshots without an SLM policy. + +## client.snapshot.getRepository [_snapshot.get_repository] +Get snapshot repository information. +```ts +client.snapshot.getRepository({ ... }) +``` + +### Arguments [_arguments_snapshot.get_repository] + +#### Request (object) [_request_snapshot.get_repository] +- **`repository` (Optional, string \| string[])**: A list of repository names +- **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node + +## client.snapshot.repositoryAnalyze [_snapshot.repository_analyze] +Analyze a snapshot repository. +Analyze the performance characteristics and any incorrect behaviour found in a repository. + +The response exposes implementation details of the analysis which may change from version to version. +The response body format is therefore not considered stable and may be different in newer versions. + +There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. +Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. + +The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. +Run your first analysis with the default parameter values to check for simple problems. +If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. +Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. +Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. + +If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. +This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. +If so, this storage system is not suitable for use as a snapshot repository. +You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. + +If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. +You can use this information to determine the performance of your storage system. +If any operation fails or returns an incorrect result, the API returns an error. +If the API returns an error, it may not have removed all the data it wrote to the repository. +The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. +Some clients are configured to close their connection if no response is received within a certain timeout. +An analysis takes a long time to complete so you might need to relax any such client-side timeouts. +On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. +The path to the leftover data is recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. +The analysis attempts to detect common bugs but it does not offer 100% coverage. +Additionally, it does not test the following: + +* Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. +* Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. +* Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. + +IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. +This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. +You must ensure this load does not affect other users of these systems. +Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. + +NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. + +NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. +A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. +This indicates it behaves incorrectly in ways that the former version did not detect. +You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. + +NOTE: This API may not work correctly in a mixed-version cluster. + +*Implementation details* + +NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. + +The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. +These tasks are distributed over the data and master-eligible nodes in the cluster for execution. + +For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. +The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. +If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. +These reads are permitted to fail, but must not return partial data. +If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. +In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. +If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. + +The executing node will use a variety of different methods to write the blob. +For instance, where applicable, it will use both single-part and multi-part uploads. +Similarly, the reading nodes will use a variety of different methods to read the data back again. +For instance they may read the entire blob from start to end or may read only a subset of the data. + +For some blob-level tasks, the executing node will cancel the write before it is complete. +In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. + +Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. +This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. +The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. +Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. +Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. +If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. +Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. +Some operations also verify the behavior on small blobs with sizes other than 8 bytes. +```ts +client.snapshot.repositoryAnalyze({ repository }) +``` + +### Arguments [_arguments_snapshot.repository_analyze] + +#### Request (object) [_request_snapshot.repository_analyze] +- **`repository` (string)**: The name of the repository. +- **`blob_count` (Optional, number)**: The total number of blobs to write to the repository during the test. +For realistic experiments, you should set it to at least `2000`. +- **`concurrency` (Optional, number)**: The number of operations to run concurrently during the test. +- **`detailed` (Optional, boolean)**: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. +If false, it returns only a summary of the analysis. +- **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. +Early read operations are only rarely performed. +- **`max_blob_size` (Optional, number \| string)**: The maximum size of a blob to be written during the test. +For realistic experiments, you should set it to at least `2gb`. +- **`max_total_data_size` (Optional, number \| string)**: An upper limit on the total size of all the blobs written during the test. +For realistic experiments, you should set it to at least `1tb`. +- **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. +- **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. +- **`read_node_count` (Optional, number)**: The number of nodes on which to read a blob after writing. +- **`register_operation_count` (Optional, number)**: The minimum number of linearizable register operations to perform in total. +For realistic experiments, you should set it to at least `100`. +- **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. +To repeat the same set of operations in multiple experiments, use the same seed in each experiment. +Note that the operations are performed concurrently so might not always happen in the same order on each run. +- **`timeout` (Optional, string \| -1 \| 0)**: The period of time to wait for the test to complete. +If no response is received before the timeout expires, the test is cancelled and returns an error. + +## client.snapshot.restore [_snapshot.restore] +Restore a snapshot. +Restore a snapshot of a cluster or data streams and indices. + +You can restore a snapshot only to a running cluster with an elected master node. +The snapshot repository must be registered and available to the cluster. +The snapshot and cluster versions must be compatible. + +To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. + +Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: + +``` +GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +``` + +If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. + +If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. +```ts +client.snapshot.restore({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.restore] + +#### Request (object) [_request_snapshot.restore] +- **`repository` (string)**: A repository name +- **`snapshot` (string)**: A snapshot name +- **`feature_states` (Optional, string[])** +- **`ignore_index_settings` (Optional, string[])** +- **`ignore_unavailable` (Optional, boolean)** +- **`include_aliases` (Optional, boolean)** +- **`include_global_state` (Optional, boolean)** +- **`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** +- **`indices` (Optional, string \| string[])** +- **`partial` (Optional, boolean)** +- **`rename_pattern` (Optional, string)** +- **`rename_replacement` (Optional, string)** +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node +- **`wait_for_completion` (Optional, boolean)**: Should this request wait until the operation has completed before returning + +## client.snapshot.status [_snapshot.status] +Get the snapshot status. +Get a detailed description of the current state for each shard participating in the snapshot. +Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. +If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. + +WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. +The API requires a read from the repository for each shard in each snapshot. +For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). + +Depending on the latency of your storage, such requests can take an extremely long time to return results. +These requests can also tax machine resources and, when using cloud storage, incur high processing costs. +```ts +client.snapshot.status({ ... }) +``` + +### Arguments [_arguments_snapshot.status] + +#### Request (object) [_request_snapshot.status] +- **`repository` (Optional, string)**: A repository name +- **`snapshot` (Optional, string \| string[])**: A list of snapshot names +- **`ignore_unavailable` (Optional, boolean)**: Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node + +## client.snapshot.verifyRepository [_snapshot.verify_repository] +Verify a snapshot repository. +Check for common misconfigurations in a snapshot repository. +```ts +client.snapshot.verifyRepository({ repository }) +``` + +### Arguments [_arguments_snapshot.verify_repository] + +#### Request (object) [_request_snapshot.verify_repository] +- **`repository` (string)**: A repository name +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout + +## client.sql.clearCursor [_sql.clear_cursor] +Clear an SQL search cursor. +```ts +client.sql.clearCursor({ cursor }) +``` + +### Arguments [_arguments_sql.clear_cursor] + +#### Request (object) [_request_sql.clear_cursor] +- **`cursor` (string)**: Cursor to clear. + +## client.sql.deleteAsync [_sql.delete_async] +Delete an async SQL search. +Delete an async SQL search or a stored synchronous SQL search. +If the search is still running, the API cancels it. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: + +* Users with the `cancel_task` cluster privilege. +* The user who first submitted the search. +```ts +client.sql.deleteAsync({ id }) +``` + +### Arguments [_arguments_sql.delete_async] + +#### Request (object) [_request_sql.delete_async] +- **`id` (string)**: The identifier for the search. + +## client.sql.getAsync [_sql.get_async] +Get async SQL search results. +Get the current status and available results for an async SQL search or stored synchronous SQL search. + +If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. +```ts +client.sql.getAsync({ id }) +``` + +### Arguments [_arguments_sql.get_async] + +#### Request (object) [_request_sql.get_async] +- **`id` (string)**: The identifier for the search. +- **`delimiter` (Optional, string)**: The separator for CSV results. +The API supports this parameter only for CSV responses. +- **`format` (Optional, string)**: The format for the response. +You must specify a format using this parameter or the `Accept` HTTP header. +If you specify both, the API uses this parameter. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The retention period for the search and its results. +It defaults to the `keep_alive` period for the original SQL search. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. + +## client.sql.getAsyncStatus [_sql.get_async_status] +Get the async SQL search status. +Get the current status of an async SQL search or a stored synchronous SQL search. +```ts +client.sql.getAsyncStatus({ id }) +``` + +### Arguments [_arguments_sql.get_async_status] + +#### Request (object) [_request_sql.get_async_status] +- **`id` (string)**: The identifier for the search. + +## client.sql.query [_sql.query] +Get SQL search results. +Run an SQL request. +```ts +client.sql.query({ ... }) +``` + +### Arguments [_arguments_sql.query] + +#### Request (object) [_request_sql.query] +- **`allow_partial_search_results` (Optional, boolean)**: If `true`, the response has partial results when there are shard request timeouts or shard failures. +If `false`, the API returns an error with no partial results. +- **`catalog` (Optional, string)**: The default catalog (cluster) for queries. +If unspecified, the queries execute on the data in the local cluster only. +- **`columnar` (Optional, boolean)**: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. +The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. +- **`cursor` (Optional, string)**: The cursor used to retrieve a set of paginated results. +If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. +It ignores other request body parameters. +- **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. +- **`field_multi_value_leniency` (Optional, boolean)**: If `false`, the API returns an exception when encountering multiple values for a field. +If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. +- **`index_using_frozen` (Optional, boolean)**: If `true`, the search can run on frozen indices. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The retention period for an async or saved synchronous search. +- **`keep_on_completion` (Optional, boolean)**: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. +If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. +- **`page_timeout` (Optional, string \| -1 \| 0)**: The minimum retention period for the scroll cursor. +After this time period, a pagination request might fail because the scroll cursor is no longer available. +Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. +- **`params` (Optional, User-defined value[])**: The values for parameters in the query. +- **`query` (Optional, string)**: The SQL query to run. +- **`request_timeout` (Optional, string \| -1 \| 0)**: The timeout before the request fails. +- **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. +These fields take precedence over mapped fields with the same name. +- **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. +If the search doesn't finish within this period, the search becomes async. + +To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile"))**: The format for the response. +You can also specify a format using the `Accept` HTTP header. +If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. + +## client.sql.translate [_sql.translate] +Translate SQL into Elasticsearch queries. +Translate an SQL search into a search API request containing Query DSL. +It accepts the same request body parameters as the SQL search API, excluding `cursor`. +```ts +client.sql.translate({ query }) +``` + +### Arguments [_arguments_sql.translate] + +#### Request (object) [_request_sql.translate] +- **`query` (string)**: The SQL query to run. +- **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. +- **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. + +## client.ssl.certificates [_ssl.certificates] +Get SSL certificates. + +Get information about the X.509 certificates that are used to encrypt communications in the cluster. +The API returns a list that includes certificates from all TLS contexts including: + +- Settings for transport and HTTP interfaces +- TLS settings that are used within authentication realms +- TLS settings for remote monitoring exporters + +The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. +It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + +The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. + +NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. + +If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. +```ts +client.ssl.certificates() +``` + + +## client.synonyms.deleteSynonym [_synonyms.delete_synonym] +Delete a synonym set. + +You can only delete a synonyms set that is not in use by any index analyzer. + +Synonyms sets can be used in synonym graph token filters and synonym token filters. +These synonym filters can be used as part of search analyzers. + +Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). +Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. + +If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. +To prevent that, synonyms sets that are used in analyzers can't be deleted. +A delete request in this case will return a 400 response code. + +To remove a synonyms set, you must first remove all indices that contain analyzers using it. +You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. +Once finished, you can delete the index. +When the synonyms set is not used in analyzers, you will be able to delete it. +```ts +client.synonyms.deleteSynonym({ id }) +``` + +### Arguments [_arguments_synonyms.delete_synonym] + +#### Request (object) [_request_synonyms.delete_synonym] +- **`id` (string)**: The synonyms set identifier to delete. + +## client.synonyms.deleteSynonymRule [_synonyms.delete_synonym_rule] +Delete a synonym rule. +Delete a synonym rule from a synonym set. +```ts +client.synonyms.deleteSynonymRule({ set_id, rule_id }) +``` + +### Arguments [_arguments_synonyms.delete_synonym_rule] + +#### Request (object) [_request_synonyms.delete_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set to update. +- **`rule_id` (string)**: The ID of the synonym rule to delete. + +## client.synonyms.getSynonym [_synonyms.get_synonym] +Get a synonym set. +```ts +client.synonyms.getSynonym({ id }) +``` + +### Arguments [_arguments_synonyms.get_synonym] + +#### Request (object) [_request_synonyms.get_synonym] +- **`id` (string)**: The synonyms set identifier to retrieve. +- **`from` (Optional, number)**: The starting offset for query rules to retrieve. +- **`size` (Optional, number)**: The max number of query rules to retrieve. + +## client.synonyms.getSynonymRule [_synonyms.get_synonym_rule] +Get a synonym rule. +Get a synonym rule from a synonym set. +```ts +client.synonyms.getSynonymRule({ set_id, rule_id }) +``` + +### Arguments [_arguments_synonyms.get_synonym_rule] + +#### Request (object) [_request_synonyms.get_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set to retrieve the synonym rule from. +- **`rule_id` (string)**: The ID of the synonym rule to retrieve. + +## client.synonyms.getSynonymsSets [_synonyms.get_synonyms_sets] +Get all synonym sets. +Get a summary of all defined synonym sets. +```ts +client.synonyms.getSynonymsSets({ ... }) +``` + +### Arguments [_arguments_synonyms.get_synonyms_sets] + +#### Request (object) [_request_synonyms.get_synonyms_sets] +- **`from` (Optional, number)**: The starting offset for synonyms sets to retrieve. +- **`size` (Optional, number)**: The maximum number of synonyms sets to retrieve. + +## client.synonyms.putSynonym [_synonyms.put_synonym] +Create or update a synonym set. +Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +If you need to manage more synonym rules, you can create multiple synonym sets. + +When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. +This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. +```ts +client.synonyms.putSynonym({ id, synonyms_set }) +``` + +### Arguments [_arguments_synonyms.put_synonym] + +#### Request (object) [_request_synonyms.put_synonym] +- **`id` (string)**: The ID of the synonyms set to be created or updated. +- **`synonyms_set` ({ id, synonyms } \| { id, synonyms }[])**: The synonym rules definitions for the synonyms set. + +## client.synonyms.putSynonymRule [_synonyms.put_synonym_rule] +Create or update a synonym rule. +Create or update a synonym rule in a synonym set. + +If any of the synonym rules included is invalid, the API returns an error. + +When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. +```ts +client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) +``` + +### Arguments [_arguments_synonyms.put_synonym_rule] + +#### Request (object) [_request_synonyms.put_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set. +- **`rule_id` (string)**: The ID of the synonym rule to be updated or created. +- **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format. + +## client.tasks.cancel [_tasks.cancel] +Cancel a task. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. +It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. +The get task information API will continue to list these cancelled tasks until they complete. +The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. + +To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. +You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. +```ts +client.tasks.cancel({ ... }) +``` + +### Arguments [_arguments_tasks.cancel] + +#### Request (object) [_request_tasks.cancel] +- **`task_id` (Optional, string \| number)**: The task identifier. +- **`actions` (Optional, string \| string[])**: A list or wildcard expression of actions that is used to limit the request. +- **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. +- **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. +- **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until all found tasks are complete. + +## client.tasks.get [_tasks.get] +Get task information. +Get information about a task currently running in the cluster. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. +```ts +client.tasks.get({ task_id }) +``` + +### Arguments [_arguments_tasks.get] + +#### Request (object) [_request_tasks.get] +- **`task_id` (string)**: The task identifier. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. + +## client.tasks.list [_tasks.list] +Get all tasks. +Get information about the tasks currently running on one or more nodes in the cluster. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +**Identifying running tasks** + +The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. +This enables you to track certain calls or associate certain tasks with the client that started them. +For example: + +``` +curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" +``` + +The API returns the following result: + +``` +HTTP/1.1 200 OK +X-Opaque-Id: 123456 +content-type: application/json; charset=UTF-8 +content-length: 831 + +{ + "tasks" : { + "u5lcZHqcQhu-rUoFaqDphA:45" : { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 45, + "type" : "transport", + "action" : "cluster:monitor/tasks/lists", + "start_time_in_millis" : 1513823752749, + "running_time_in_nanos" : 293139, + "cancellable" : false, + "headers" : { + "X-Opaque-Id" : "123456" + }, + "children" : [ + { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 46, + "type" : "direct", + "action" : "cluster:monitor/tasks/lists[n]", + "start_time_in_millis" : 1513823752750, + "running_time_in_nanos" : 92133, + "cancellable" : false, + "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", + "headers" : { + "X-Opaque-Id" : "123456" + } + } + ] + } + } + } +``` +In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. +The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. +The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. +```ts +client.tasks.list({ ... }) +``` + +### Arguments [_arguments_tasks.list] + +#### Request (object) [_request_tasks.list] +- **`actions` (Optional, string \| string[])**: A list or wildcard expression of actions used to limit the request. +For example, you can use `cluser:*` to retrieve all cluster-related tasks. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about the running tasks. +This information is useful to distinguish tasks from each other but is more costly to run. +- **`group_by` (Optional, Enum("nodes" \| "parents" \| "none"))**: A key that is used to group tasks in the response. +The task lists can be grouped either by nodes or by parent tasks. +- **`nodes` (Optional, string \| string[])**: A list of node IDs or names that is used to limit the returned information. +- **`parent_task_id` (Optional, string)**: A parent task identifier that is used to limit returned information. +To return all tasks, omit this parameter or use a value of `-1`. +If the parent task is not found, the API does not return a 404 response code. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its information. +However, timed out nodes are included in the `node_failures` property. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. + +## client.textStructure.findFieldStructure [_text_structure.find_field_structure] +Find the structure of a text field. +Find the structure of a text field in an Elasticsearch index. + +This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. +For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. +```ts +client.textStructure.findFieldStructure({ field, index }) +``` + +### Arguments [_arguments_text_structure.find_field_structure] + +#### Request (object) [_request_text_structure.find_field_structure] +- **`field` (string)**: The field that should be analyzed. +- **`index` (string)**: The name of the index that contains the analyzed field. +- **`column_names` (Optional, string)**: If `format` is set to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header row, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`documents_to_sample` (Optional, number)**: The number of documents to include in the structural analysis. +The minimum value is 2. +- **`ecs_compatibility` (Optional, Enum("disabled" \| "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. +The intention in that situation is that a user who knows the meanings will rename the fields before using them. +- **`explain` (Optional, boolean)**: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +- **`format` (Optional, Enum("delimited" \| "ndjson" \| "semi_structured_text" \| "xml"))**: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. + +## client.textStructure.findMessageStructure [_text_structure.find_message_structure] +Find the structure of text messages. +Find the structure of a list of text messages. +The messages must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. +```ts +client.textStructure.findMessageStructure({ messages }) +``` + +### Arguments [_arguments_text_structure.find_message_structure] + +#### Request (object) [_request_text_structure.find_message_structure] +- **`messages` (string[])**: The list of messages you want to analyze. +- **`column_names` (Optional, string)**: If the format is `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you the format is `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`ecs_compatibility` (Optional, Enum("disabled" \| "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +- **`explain` (Optional, boolean)**: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +- **`format` (Optional, Enum("delimited" \| "ndjson" \| "semi_structured_text" \| "xml"))**: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. + +## client.textStructure.findStructure [_text_structure.find_structure] +Find the structure of a text file. +The text file must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. +It must, however, be text; binary text formats are not currently supported. +The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. + +The response from the API contains: + +* A couple of messages from the beginning of the text. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +```ts +client.textStructure.findStructure({ ... }) +``` + +### Arguments [_arguments_text_structure.find_structure] + +#### Request (object) [_request_text_structure.find_structure] +- **`text_files` (Optional, TJsonDocument[])** +- **`charset` (Optional, string)**: The text's character set. +It must be a character set that is supported by the JVM that Elasticsearch uses. +For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +If this parameter is not specified, the structure finder chooses an appropriate character set. +- **`column_names` (Optional, string)**: If you have set format to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +- **`explain` (Optional, boolean)**: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. +- **`format` (Optional, string)**: The high level structure of the text. +Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`has_header_row` (Optional, boolean)**: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. +- **`line_merge_size_limit` (Optional, number)**: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. +If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. +- **`lines_to_sample` (Optional, number)**: The number of lines to include in the structural analysis, starting from the beginning of the text. +The minimum is 2. +If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + +NOTE: The number of lines and the variation of the lines affects the speed of the analysis. +For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. +If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. +- **`quote` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires then it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. + +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. +Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. + +## client.textStructure.testGrokPattern [_text_structure.test_grok_pattern] +Test a Grok pattern. +Test a Grok pattern on one or more lines of text. +The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. +```ts +client.textStructure.testGrokPattern({ grok_pattern, text }) +``` + +### Arguments [_arguments_text_structure.test_grok_pattern] + +#### Request (object) [_request_text_structure.test_grok_pattern] +- **`grok_pattern` (string)**: The Grok pattern to run on the text. +- **`text` (string[])**: The lines of text to run the Grok pattern on. +- **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. + +## client.transform.deleteTransform [_transform.delete_transform] +Delete a transform. +```ts +client.transform.deleteTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.delete_transform] + +#### Request (object) [_request_transform.delete_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`force` (Optional, boolean)**: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is +deleted regardless of its current state. +- **`delete_dest_index` (Optional, boolean)**: If this value is true, the destination index is deleted together with the transform. If false, the destination +index will not be deleted +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.transform.getNodeStats [_transform.get_node_stats] +Retrieves transform usage information for transform nodes. + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html) + +```ts +client.transform.getNodeStats() +``` + + +## client.transform.getTransform [_transform.get_transform] +Get transforms. +Get configuration information for transforms. +```ts +client.transform.getTransform({ ... }) +``` + +### Arguments [_arguments_transform.get_transform] + +#### Request (object) [_request_transform.get_transform] +- **`transform_id` (Optional, string \| string[])**: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. +- **`exclude_generated` (Optional, boolean)**: Excludes fields that were automatically added when creating the +transform. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +## client.transform.getTransformStats [_transform.get_transform_stats] +Get transform stats. + +Get usage information for transforms. +```ts +client.transform.getTransformStats({ transform_id }) +``` + +### Arguments [_arguments_transform.get_transform_stats] + +#### Request (object) [_request_transform.get_transform_stats] +- **`transform_id` (string \| string[])**: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the stats + +## client.transform.previewTransform [_transform.preview_transform] +Preview a transform. +Generates a preview of the results that you will get when you create a transform with the same configuration. + +It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also +generates a list of mappings and settings for the destination index. These values are determined based on the field +types of the source index and the transform aggregations. +```ts +client.transform.previewTransform({ ... }) +``` + +### Arguments [_arguments_transform.preview_transform] + +#### Request (object) [_request_transform.preview_transform] +- **`transform_id` (Optional, string)**: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform +configuration details in the request body. +- **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +- **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. +These objects define the group by fields and the aggregation to reduce +the data. +- **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +- **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for +each unique key. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +## client.transform.putTransform [_transform.put_transform] +Create a transform. +Creates a transform. + +A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as +a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a +unique row per entity. + +You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If +you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in +the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values +in the latest object. + +You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and +`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the +transform remembers which roles the user that created it had at the time of creation and uses those same roles. If +those roles do not have the required privileges on the source and destination indices, the transform fails when it +attempts unauthorized operations. + +NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any +`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do +not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not +give users any privileges on `.data-frame-internal*` indices. +```ts +client.transform.putTransform({ transform_id, dest, source }) +``` + +### Arguments [_arguments_transform.put_transform] + +#### Request (object) [_request_transform.put_transform] +- **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +- **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also +determines the retry interval in the event of transient failures while the transform is searching or indexing. +The minimum value is `1s` and the maximum is `1h`. +- **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. +- **`_meta` (Optional, Record)**: Defines optional transform metadata. +- **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields +and the aggregation to reduce the data. +- **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the +destination index. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`defer_validation` (Optional, boolean)**: When the transform is created, a series of validations occur to ensure its success. For example, there is a +check for the existence of the source indices and a check that the destination index is not part of the source +index pattern. You can use this parameter to skip the checks, for example when the source index does not exist +until after the transform is created. The validations are always run when you start the transform, however, with +the exception of privilege checks. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.transform.resetTransform [_transform.reset_transform] +Reset a transform. + +Before you can reset it, you must stop it; alternatively, use the `force` query parameter. +If the destination index was created by the transform, it is deleted. +```ts +client.transform.resetTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.reset_transform] + +#### Request (object) [_request_transform.reset_transform] +- **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +- **`force` (Optional, boolean)**: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform +must be stopped before it can be reset. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.transform.scheduleNowTransform [_transform.schedule_now_transform] +Schedule a transform to start now. + +Instantly run a transform to process data. +If you run this API, the transform will process the new data instantly, +without waiting for the configured frequency interval. After the API is called, +the transform will be processed again at `now + frequency` unless the API +is called again in the meantime. +```ts +client.transform.scheduleNowTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.schedule_now_transform] + +#### Request (object) [_request_transform.schedule_now_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the scheduling to take place + +## client.transform.setUpgradeMode [_transform.set_upgrade_mode] +Set upgrade_mode for transform indices. +Sets a cluster wide upgrade_mode setting that prepares transform +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your transform indices. In those circumstances, +there must be no transforms running. You can close the transforms, +do the upgrade, then open all the transforms again. Alternatively, +you can use this API to temporarily halt tasks associated with the transforms +and prevent new transforms from opening. You can also use this API +during upgrades that do not require you to reindex your transform +indices, though stopping transforms is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +transform info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-set-upgrade-mode) + +```ts +client.transform.setUpgradeMode({ ... }) +``` + +### Arguments [_arguments_transform.set_upgrade_mode] + +#### Request (object) [_request_transform.set_upgrade_mode] +- **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all +transform tasks and prohibits new transform tasks from +starting. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the request to be completed. + +## client.transform.startTransform [_transform.start_transform] +Start a transform. + +When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is +set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping +definitions for the destination index from the source indices and the transform aggregations. If fields in the +destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), +the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce +mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you +start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings +in a pivot transform. + +When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you +created the transform, they occur when you start the transform—​with the exception of privilege checks. When +Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the +time of creation and uses those same roles. If those roles do not have the required privileges on the source and +destination indices, the transform fails when it attempts unauthorized operations. +```ts +client.transform.startTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.start_transform] + +#### Request (object) [_request_transform.start_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`from` (Optional, string)**: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. + +## client.transform.stopTransform [_transform.stop_transform] +Stop transforms. +Stops one or more transforms. +```ts +client.transform.stopTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.stop_transform] + +#### Request (object) [_request_transform.stop_transform] +- **`transform_id` (string)**: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. +To stop all transforms, use `_all` or `*` as the identifier. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; +contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there +are only partial matches. + +If it is true, the API returns a successful acknowledgement message when there are no matches. When there are +only partial matches, the API stops the appropriate transforms. + +If it is false, the request returns a 404 status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: If it is true, the API forcefully stops the transforms. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the +timeout expires, the request returns a timeout exception. However, the request continues processing and +eventually moves the transform to a STOPPED state. +- **`wait_for_checkpoint` (Optional, boolean)**: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, +the transform stops as soon as possible. +- **`wait_for_completion` (Optional, boolean)**: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns +immediately and the indexer is stopped asynchronously in the background. + +## client.transform.updateTransform [_transform.update_transform] +Update a transform. +Updates certain properties of a transform. + +All updated properties except `description` do not take effect until after the transform starts the next checkpoint, +thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` +privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When +Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the +time of update and runs with those privileges. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-transform-update-transform) + +```ts +client.transform.updateTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.update_transform] + +#### Request (object) [_request_transform.update_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +- **`_meta` (Optional, Record)**: Defines optional transform metadata. +- **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`retention_policy` (Optional, { time } \| null)**: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +- **`defer_validation` (Optional, boolean)**: When true, deferrable validations are not run. This behavior may be +desired if the source index does not exist until after the transform is +created. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +## client.transform.upgradeTransforms [_transform.upgrade_transforms] +Upgrade all transforms. + +Transforms are compatible across minor versions and between supported major versions. +However, over time, the format of transform configuration information may change. +This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. +It also cleans up the internal data structures that store the transform state and checkpoints. +The upgrade does not affect the source and destination indices. +The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. +Resolve the issue then re-run the process again. +A summary is returned when the upgrade is finished. + +To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. +You may want to perform a recent cluster backup prior to the upgrade. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-transform-upgrade-transforms) + +```ts +client.transform.upgradeTransforms({ ... }) +``` + +### Arguments [_arguments_transform.upgrade_transforms] + +#### Request (object) [_request_transform.upgrade_transforms] +- **`dry_run` (Optional, boolean)**: When true, the request checks for updates but does not run them. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and +returns an error. + +## client.watcher.ackWatch [_watcher.ack_watch] +Acknowledge a watch. +Acknowledging a watch enables you to manually throttle the execution of the watch's actions. + +The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. + +IMPORTANT: If the specified watch is currently being executed, this API will return an error +The reason for this behavior is to prevent overwriting the watch status from a watch execution. + +Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. +This happens when the condition of the watch is not met (the condition evaluates to false). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-ack-watch) + +```ts +client.watcher.ackWatch({ watch_id }) +``` + +### Arguments [_arguments_watcher.ack_watch] + +#### Request (object) [_request_watcher.ack_watch] +- **`watch_id` (string)**: The watch identifier. +- **`action_id` (Optional, string \| string[])**: A list of the action identifiers to acknowledge. +If you omit this parameter, all of the actions of the watch are acknowledged. + +## client.watcher.activateWatch [_watcher.activate_watch] +Activate a watch. +A watch can be either active or inactive. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-activate-watch) + +```ts +client.watcher.activateWatch({ watch_id }) +``` + +### Arguments [_arguments_watcher.activate_watch] + +#### Request (object) [_request_watcher.activate_watch] +- **`watch_id` (string)**: The watch identifier. + +## client.watcher.deactivateWatch [_watcher.deactivate_watch] +Deactivate a watch. +A watch can be either active or inactive. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-deactivate-watch) + +```ts +client.watcher.deactivateWatch({ watch_id }) +``` + +### Arguments [_arguments_watcher.deactivate_watch] + +#### Request (object) [_request_watcher.deactivate_watch] +- **`watch_id` (string)**: The watch identifier. + +## client.watcher.deleteWatch [_watcher.delete_watch] +Delete a watch. +When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. + +Deleting a watch does not delete any watch execution records related to this watch from the watch history. + +IMPORTANT: Deleting a watch must be done by using only this API. +Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API +When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-delete-watch) + +```ts +client.watcher.deleteWatch({ id }) +``` + +### Arguments [_arguments_watcher.delete_watch] + +#### Request (object) [_request_watcher.delete_watch] +- **`id` (string)**: The watch identifier. + +## client.watcher.executeWatch [_watcher.execute_watch] +Run a watch. +This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. + +For testing and debugging purposes, you also have fine-grained control on how the watch runs. +You can run the watch without running all of its actions or alternatively by simulating them. +You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. + +You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. +This serves as great tool for testing and debugging your watches prior to adding them to Watcher. + +When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. +If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. + +When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-execute-watch) + +```ts +client.watcher.executeWatch({ ... }) +``` + +### Arguments [_arguments_watcher.execute_watch] + +#### Request (object) [_request_watcher.execute_watch] +- **`id` (Optional, string)**: The watch identifier. +- **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. +- **`alternative_input` (Optional, Record)**: When present, the watch uses this object as a payload instead of executing its own input. +- **`ignore_condition` (Optional, boolean)**: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. +- **`record_execution` (Optional, boolean)**: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. +In addition, the status of the watch is updated, possibly throttling subsequent runs. +This can also be specified as an HTTP parameter. +- **`simulated_actions` (Optional, { actions, all, use_all })** +- **`trigger_data` (Optional, { scheduled_time, triggered_time })**: This structure is parsed as the data of the trigger event that will be used during the watch execution. +- **`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })**: When present, this watch is used instead of the one specified in the request. +This watch is not persisted to the index and `record_execution` cannot be set. +- **`debug` (Optional, boolean)**: Defines whether the watch runs in debug mode. + +## client.watcher.getSettings [_watcher.get_settings] +Get Watcher index settings. +Get settings for the Watcher internal index (`.watches`). +Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-get-settings) + +```ts +client.watcher.getSettings({ ... }) +``` + +### Arguments [_arguments_watcher.get_settings] + +#### Request (object) [_request_watcher.get_settings] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.watcher.getWatch [_watcher.get_watch] +Get a watch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-get-watch) + +```ts +client.watcher.getWatch({ id }) +``` + +### Arguments [_arguments_watcher.get_watch] + +#### Request (object) [_request_watcher.get_watch] +- **`id` (string)**: The watch identifier. + +## client.watcher.putWatch [_watcher.put_watch] +Create or update a watch. +When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. +Typically for the `schedule` trigger, the scheduler is the trigger engine. + +IMPORTANT: You must use Kibana or this API to create a watch. +Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. +If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. + +When you add a watch you can also define its initial active state by setting the *active* parameter. + +When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. +If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-put-watch) + +```ts +client.watcher.putWatch({ id }) +``` + +### Arguments [_arguments_watcher.put_watch] + +#### Request (object) [_request_watcher.put_watch] +- **`id` (string)**: The identifier for the watch. +- **`actions` (Optional, Record)**: The list of actions that will be run if the condition matches. +- **`condition` (Optional, { always, array_compare, compare, never, script })**: The condition that defines if the actions should be run. +- **`input` (Optional, { chain, http, search, simple })**: The input that defines the input that loads the data for the watch. +- **`metadata` (Optional, Record)**: Metadata JSON that will be copied into the history entries. +- **`throttle_period` (Optional, string \| -1 \| 0)**: The minimum time between actions being run. +The default is 5 seconds. +This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. +If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. +- **`throttle_period_in_millis` (Optional, Unit)**: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. +- **`transform` (Optional, { chain, script, search })**: The transform that processes the watch payload to prepare it for the watch actions. +- **`trigger` (Optional, { schedule })**: The trigger that defines when the watch should run. +- **`active` (Optional, boolean)**: The initial state of the watch. +The default value is `true`, which means the watch is active by default. +- **`if_primary_term` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified primary term +- **`if_seq_no` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified sequence number +- **`version` (Optional, number)**: Explicit version number for concurrency control + +## client.watcher.queryWatches [_watcher.query_watches] +Query watches. +Get all registered watches in a paginated manner and optionally filter watches by a query. + +Note that only the `_id` and `metadata.*` fields are queryable or sortable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-query-watches) + +```ts +client.watcher.queryWatches({ ... }) +``` + +### Arguments [_arguments_watcher.query_watches] + +#### Request (object) [_request_watcher.query_watches] +- **`from` (Optional, number)**: The offset from the first result to fetch. +It must be non-negative. +- **`size` (Optional, number)**: The number of hits to return. +It must be non-negative. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query that filters the watches to be returned. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. +- **`search_after` (Optional, number \| number \| string \| boolean \| null \| User-defined value[])**: Retrieve the next page of hits using a set of sort values from the previous page. + +## client.watcher.start [_watcher.start] +Start the watch service. +Start the Watcher service if it is not already running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-start) + +```ts +client.watcher.start({ ... }) +``` + +### Arguments [_arguments_watcher.start] + +#### Request (object) [_request_watcher.start] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.watcher.stats [_watcher.stats] +Get Watcher statistics. +This API always returns basic metrics. +You retrieve more metrics by using the metric parameter. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-stats) + +```ts +client.watcher.stats({ ... }) +``` + +### Arguments [_arguments_watcher.stats] + +#### Request (object) [_request_watcher.stats] +- **`metric` (Optional, Enum("_all" \| "queued_watches" \| "current_watches" \| "pending_watches") \| Enum("_all" \| "queued_watches" \| "current_watches" \| "pending_watches")[])**: Defines which additional metrics are included in the response. +- **`emit_stacktraces` (Optional, boolean)**: Defines whether stack traces are generated for each watch that is running. + +## client.watcher.stop [_watcher.stop] +Stop the watch service. +Stop the Watcher service if it is running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-stop) + +```ts +client.watcher.stop({ ... }) +``` + +### Arguments [_arguments_watcher.stop] + +#### Request (object) [_request_watcher.stop] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.watcher.updateSettings [_watcher.update_settings] +Update Watcher index settings. +Update settings for the Watcher internal index (`.watches`). +Only a subset of settings can be modified. +This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, +`index.routing.allocation.include.*` and `index.routing.allocation.require.*`. +Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the +Watcher shards must always be in the `data_content` tier. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-watcher-update-settings) + +```ts +client.watcher.updateSettings({ ... }) +``` + +### Arguments [_arguments_watcher.update_settings] + +#### Request (object) [_request_watcher.update_settings] +- **`index.auto_expand_replicas` (Optional, string)** +- **`index.number_of_replicas` (Optional, number)** +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.xpack.info [_xpack.info] +Get information. +The information provided by the API includes: + +* Build information including the build number and timestamp. +* License information about the currently installed license. +* Feature information for the features that are currently enabled and available under the current license. +```ts +client.xpack.info({ ... }) +``` + +### Arguments [_arguments_xpack.info] + +#### Request (object) [_request_xpack.info] +- **`categories` (Optional, Enum("build" \| "features" \| "license")[])**: A list of the information categories to include in the response. +For example, `build,license,features`. +- **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true +- **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. +In particular, it adds descriptions and a tag line. + +## client.xpack.usage [_xpack.usage] +Get usage information. +Get information about the features that are currently enabled and available under the current license. +The API also provides some usage statistics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v8/group/endpoint-xpack) + +```ts +client.xpack.usage({ ... }) +``` + +### Arguments [_arguments_xpack.usage] + +#### Request (object) [_request_xpack.usage] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + diff --git a/src/api/types.ts b/src/api/types.ts index 642269954..7840d8778 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable @typescript-eslint/array-type */ @@ -40,8 +26,11 @@ export interface BulkIndexOperation extends BulkWriteOperation { } export interface BulkOperationBase { + /** The document ID. */ _id?: Id + /** The name of the index or index alias to perform the action on. */ _index?: IndexName + /** A custom value used to route operations to a specific shard. */ routing?: Routing if_primary_term?: long if_seq_no?: SequenceNumber @@ -50,109 +39,243 @@ export interface BulkOperationBase { } export interface BulkOperationContainer { + /** Index the specified document. + * If the document exists, it replaces the document and increments the version. + * The following line must contain the source data to be indexed. */ index?: BulkIndexOperation + /** Index the specified document if it does not already exist. + * The following line must contain the source data to be indexed. */ create?: BulkCreateOperation + /** Perform a partial document update. + * The following line must contain the partial document and update options. */ update?: BulkUpdateOperation + /** Remove the specified document from the index. */ delete?: BulkDeleteOperation } export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { + /** The name of the data stream, index, or index alias to perform bulk actions on. */ index?: IndexName + /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean + /** If `true`, the response will include the ingest pipelines that were run for each index or create. */ list_executed_pipelines?: boolean + /** The pipeline identifier to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, wait for a refresh to make this operation visible to search. + * If `false`, do nothing with refreshes. + * Valid values: `true`, `false`, `wait_for`. */ refresh?: Refresh + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. + * The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default is `1`, which waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ require_data_stream?: boolean operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, include_source_on_error?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, include_source_on_error?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } } export interface BulkResponse { + /** If `true`, one or more of the operations in the bulk request did not complete successfully. */ errors: boolean + /** The result of each operation in the bulk request, in the order they were submitted. */ items: Partial>[] + /** The length of time, in milliseconds, it took to process the bulk request. */ took: long ingest_took?: long } export interface BulkResponseItem { + /** The document ID associated with the operation. */ _id?: string | null + /** The name of the index associated with the operation. + * If the operation targeted a data stream, this is the backing index into which the document was written. */ _index: string + /** The HTTP status code returned for the operation. */ status: integer failure_store?: BulkFailureStoreStatus + /** Additional information about the failed operation. + * The property is returned only for failed operations. */ error?: ErrorCause + /** The primary term assigned to the document for the operation. + * This property is returned only for successful operations. */ _primary_term?: long + /** The result of the operation. + * Successful values are `created`, `deleted`, and `updated`. */ result?: string + /** The sequence number assigned to the document for the operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** Shard information for the operation. */ _shards?: ShardStatistics + /** The document version associated with the operation. + * The document version is incremented each time the document is updated. + * This property is returned only for successful actions. */ _version?: VersionNumber forced_refresh?: boolean get?: InlineGet> } export interface BulkUpdateAction { + /** If true, the `result` in the response is set to 'noop' when no changes to the document occur. */ detect_noop?: boolean + /** A partial update to an existing document. */ doc?: TPartialDocument + /** Set to `true` to use the contents of `doc` as the value of `upsert`. */ doc_as_upsert?: boolean + /** The script to run to update the document. */ script?: Script | string + /** Set to `true` to run the script whether or not the document exists. */ scripted_upsert?: boolean + /** If `false`, source retrieval is turned off. + * You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig + /** If the document does not already exist, the contents of `upsert` are inserted as a new document. + * If the document exists, the `script` is run. */ upsert?: TDocument } export interface BulkUpdateOperation extends BulkOperationBase { + /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean + /** The number of times an update should be retried in the case of a version conflict. */ retry_on_conflict?: integer } export interface BulkWriteOperation extends BulkOperationBase { + /** A map from the full name of fields to the name of dynamic templates. + * It defaults to an empty map. + * If a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template. + * If a field is already defined in the mapping, then this parameter won't be used. */ dynamic_templates?: Record + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string + /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean } export interface ClearScrollRequest extends RequestBase { + /** A comma-separated list of scroll IDs to clear. + * To clear all scroll IDs, use `_all`. + * IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. */ scroll_id?: ScrollIds + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { scroll_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { scroll_id?: never } } export interface ClearScrollResponse { + /** If `true`, the request succeeded. + * This does not indicate whether any scrolling search requests were cleared. */ succeeded: boolean + /** The number of scrolling search requests cleared. */ num_freed: integer } export interface ClosePointInTimeRequest extends RequestBase { + /** The ID of the point-in-time. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface ClosePointInTimeResponse { + /** If `true`, all search contexts associated with the point-in-time ID were successfully closed. */ succeeded: boolean + /** The number of search contexts that were successfully closed. */ num_freed: integer } export interface CountRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The minimum `_score` value that documents must have to be included in the result. */ min_score?: double + /** The node or shard the operation should be performed on. + * By default, it is random. */ preference?: string + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long + /** The query in Lucene query string syntax. This parameter cannot be used with a request body. */ q?: string + /** Defines the search query using Query DSL. A request body query cannot be used + * with the `q` query string parameter. */ query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } } export interface CountResponse { @@ -161,136 +284,343 @@ export interface CountResponse { } export interface CreateRequest extends RequestBase { + /** A unique identifier for the document. + * To automatically generate a document ID, use the `POST //_doc/` request format. */ id: Id + /** The name of the data stream or index to target. + * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. + * If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName + /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh + /** If `true`, the destination must be an index alias. */ require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ require_data_stream?: boolean + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * Elasticsearch waits for at least the specified timeout period before failing. + * The actual wait time could be longer, particularly when multiple waits occur. + * + * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. + * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** The explicit version number for concurrency control. + * It must be a non-negative long number. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** The number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards document?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } } export type CreateResponse = WriteResponseBase export interface DeleteRequest extends RequestBase { + /** A unique identifier for the document. */ id: Id + /** The name of the target index. */ index: IndexName + /** Only perform the operation if the document has this primary term. */ if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** The period to wait for active shards. + * + * This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. + * By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. */ timeout?: Duration + /** An explicit version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** The minimum number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never } } export type DeleteResponse = WriteResponseBase export interface DeleteByQueryRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** What to do if delete by query hits version conflicts: `abort` or `proceed`. */ conflicts?: Conflicts + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** Skips the specified number of documents. */ from?: long + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string + /** If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. + * This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. + * Unlike the delete API, it does not support `wait_for`. */ refresh?: boolean + /** If `true`, the request cache is used for this request. + * Defaults to the index-level setting. */ request_cache?: boolean + /** The throttle for this request in sub-requests per second. */ requests_per_second?: float + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** A query in the Lucene query string syntax. */ q?: string + /** The period to retain the search context for scrolling. */ scroll?: Duration + /** The size of the scroll request that powers the operation. */ scroll_size?: long + /** The explicit timeout for each search request. + * It defaults to no timeout. */ search_timeout?: Duration + /** The type of the search operation. + * Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType + /** The number of slices this task should be divided into. */ slices?: Slices - sort?: string[] + /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long + /** The period each deletion request waits for active shards. */ timeout?: Duration + /** If `true`, returns the document version as part of a hit. */ version?: boolean + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The `timeout` value controls how long each write request waits for unavailable shards to become available. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. + * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. */ wait_for_completion?: boolean + /** The maximum number of documents to delete. */ max_docs?: long + /** The documents to delete specified with Query DSL. */ query?: QueryDslQueryContainer + /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll + /** A sort object that specifies the order of deleted documents. */ + sort?: Sort + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never, sort?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never, sort?: never } } export interface DeleteByQueryResponse { + /** The number of scroll responses pulled back by the delete by query. */ batches?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** An array of failures if there were any unrecoverable errors during the process. + * If this array is not empty, the request ended abnormally because of those failures. + * Delete by query is implemented using batches and any failures cause the entire process to end but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent reindex from ending on version conflicts. */ failures?: BulkIndexByScrollFailure[] + /** This field is always equal to zero for delete by query. + * It exists only so that delete by query, update by query, and reindex APIs return responses with the same structure. */ noops?: long + /** The number of requests per second effectively run during the delete by query. */ requests_per_second?: float + /** The number of retries attempted by delete by query. + * `bulk` is the number of bulk actions retried. + * `search` is the number of search actions retried. */ retries?: Retries slice_id?: integer task?: TaskId throttled?: Duration + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in a `_delete_by_query` response. + * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: DurationValue + /** If `true`, some requests run during the delete by query operation timed out. */ timed_out?: boolean + /** The number of milliseconds from start to end of the whole operation. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of version conflicts that the delete by query hit. */ version_conflicts?: long } export interface DeleteByQueryRethrottleRequest extends RequestBase { + /** The ID for the task. */ task_id: TaskId + /** The throttle for this request in sub-requests per second. + * To disable throttling, set it to `-1`. */ requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } } export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { + /** The identifier for the stored script or search template. */ id: Id + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } } export type DeleteScriptResponse = AcknowledgedResponseBase export interface ExistsRequest extends RequestBase { + /** A unique document identifier. */ id: Id + /** A comma-separated list of data streams, indices, and aliases. + * It supports wildcards (`*`). */ index: IndexName + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. + * + * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. + * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. + * This can help with "jumping values" when hitting different shards in different refresh states. + * A sample value can be something like the web session ID or the user name. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` parameter defaults to `false`. */ stored_fields?: Fields + /** Explicit version number for concurrency control. + * The specified version must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } } export type ExistsResponse = boolean export interface ExistsSourceRequest extends RequestBase { + /** A unique identifier for the document. */ id: Id + /** A comma-separated list of data streams, indices, and aliases. + * It supports wildcards (`*`). */ index: IndexName + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude in the response. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } } export type ExistsSourceResponse = boolean @@ -308,21 +638,52 @@ export interface ExplainExplanationDetail { } export interface ExplainRequest extends RequestBase { + /** The document identifier. */ id: Id + /** Index names that are used to limit the request. + * Only a single index name can be provided to this parameter. */ index: IndexName + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** `True` or `false` to return the `_source` field or not or a list of fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** A comma-separated list of stored fields to return in the response. */ stored_fields?: Fields + /** The query in the Lucene query string syntax. */ q?: string + /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, lenient?: never, preference?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, lenient?: never, preference?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, q?: never, query?: never } } export interface ExplainResponse { @@ -334,73 +695,171 @@ export interface ExplainResponse { } export interface FieldCapsFieldCapability { + /** Whether this field can be aggregated on all indices. */ aggregatable: boolean + /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ indices?: Indices + /** Merged metadata across all indices as a map of string keys to arrays of values. A value length of 1 indicates that all indices had the same value for this key, while a length of 2 or more indicates that not all indices had the same value for this key. */ meta?: Metadata + /** The list of indices where this field is not aggregatable, or null if all indices have the same definition for the field. */ non_aggregatable_indices?: Indices + /** The list of indices where this field is not searchable, or null if all indices have the same definition for the field. */ non_searchable_indices?: Indices + /** Whether this field is indexed for search on all indices. */ searchable: boolean type: string + /** Whether this field is registered as a metadata field. */ metadata_field?: boolean + /** Whether this field is used as a time series dimension. + * @experimental */ time_series_dimension?: boolean + /** Contains metric type if this fields is used as a time series + * metrics, absent if the field is not used as metric. + * @experimental */ time_series_metric?: MappingTimeSeriesMetricType + /** If this list is present in response then some indices have the + * field marked as a dimension and other indices, the ones in this list, do not. + * @experimental */ non_dimension_indices?: IndexName[] + /** The list of indices where this field is present if these indices + * don’t have the same `time_series_metric` value for this field. + * @experimental */ metric_conflicts_indices?: IndexName[] } export interface FieldCapsRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, + * or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request + * targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** If true, unmapped fields are included in the response. */ include_unmapped?: boolean + /** A comma-separated list of filters to apply to the response. */ filters?: string + /** A comma-separated list of field types to include. + * Any fields that do not match one of these types will be excluded from the results. + * It defaults to empty, meaning that all field types are returned. */ types?: string[] + /** If false, empty fields are not included in the response. */ include_empty_fields?: boolean + /** A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ fields?: Fields + /** Filter indices if the provided query rewrites to `match_none` on every shard. + * + * IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. + * For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. + * However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. */ index_filter?: QueryDslQueryContainer + /** Define ad-hoc runtime fields in the request similar to the way it is done in search requests. + * These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ runtime_mappings?: MappingRuntimeFields + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } } export interface FieldCapsResponse { + /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ indices: Indices fields: Record> } export interface GetGetResult { + /** The name of the index the document belongs to. */ _index: IndexName + /** If the `stored_fields` parameter is set to `true` and `found` is `true`, it contains the document fields stored in the index. */ fields?: Record _ignored?: string[] + /** Indicates whether the document exists. */ found: boolean + /** The unique identifier for the document. */ _id: Id + /** The primary term assigned to the document for the indexing operation. */ _primary_term?: long + /** The explicit routing, if set. */ _routing?: string + /** The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** If `found` is `true`, it contains the document data formatted in JSON. + * If the `_source` parameter is set to `false` or the `stored_fields` parameter is set to `true`, it is excluded. */ _source?: TDocument + /** The document version, which is ncremented each time the document is updated. */ _version?: VersionNumber } export interface GetRequest extends RequestBase { + /** A unique document identifier. */ id: Id + /** The name of the index that contains the document. */ index: IndexName + /** Indicates whether the request forces synthetic `_source`. + * Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. + * Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ force_synthetic_source?: boolean + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. + * + * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. + * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. + * This can help with "jumping values" when hitting different shards in different refresh states. + * A sample value can be something like the web session ID or the user name. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` parameter defaults to `false`. + * Only leaf fields can be retrieved with the `stored_field` option. + * Object fields can't be returned;if specified, the request fails. */ stored_fields?: Fields + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } } export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { + /** The identifier for the stored script or search template. */ id: Id + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never } } export interface GetScriptResponse { @@ -426,6 +885,10 @@ export interface GetScriptContextContextMethodParam { } export interface GetScriptContextRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface GetScriptContextResponse { @@ -438,6 +901,10 @@ export interface GetScriptLanguagesLanguageContext { } export interface GetScriptLanguagesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface GetScriptLanguagesResponse { @@ -446,17 +913,35 @@ export interface GetScriptLanguagesResponse { } export interface GetSourceRequest extends RequestBase { + /** A unique document identifier. */ id: Id + /** The name of the index that contains the document. */ index: IndexName + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude in the response. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } } export type GetSourceResponse = TDocument @@ -576,10 +1061,18 @@ export interface HealthReportRepositoryIntegrityIndicatorDetails { } export interface HealthReportRequest extends RequestBase { + /** A feature of the cluster, as returned by the top-level health report API. */ feature?: string | string[] + /** Explicit operation timeout. */ timeout?: Duration + /** Opt-in for more information about the health of the system. */ verbose?: boolean + /** Limit the number of affected resources the health report API returns. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { feature?: never, timeout?: never, verbose?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { feature?: never, timeout?: never, verbose?: never, size?: never } } export interface HealthReportResponse { @@ -641,61 +1134,144 @@ export interface HealthReportStagnatingBackingIndices { } export interface IndexRequest extends RequestBase { + /** A unique identifier for the document. + * To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. */ id?: Id + /** The name of the data stream or index to target. + * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. + * If the target doesn't exist and doesn't match a data stream template, this request creates the index. + * You can check for existing targets with the resolve index API. */ index: IndexName + /** Only perform the operation if the document has this primary term. */ if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean + /** Set to `create` to only index the document if it does not already exist (put if absent). + * If a document with the specified `_id` already exists, the indexing operation will fail. + * The behavior is the same as using the `/_create` endpoint. + * If a document ID is specified, this paramater defaults to `index`. + * Otherwise, it defaults to `create`. + * If the request targets a data stream, an `op_type` of `create` is required. */ op_type?: OpType + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. + * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * + * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. + * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** An explicit version number for concurrency control. + * It must be a non-negative long number. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** The number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the destination must be an index alias. */ require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ require_data_stream?: boolean document?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, document?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, document?: never } } export type IndexResponse = WriteResponseBase export interface InfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface InfoResponse { + /** The responding cluster's name. */ cluster_name: Name cluster_uuid: Uuid + /** The responding node's name. */ name: Name tagline: string + /** The running version of Elasticsearch. */ version: ElasticsearchVersionInfo } export interface KnnSearchRequest extends RequestBase { + /** A comma-separated list of index names to search; + * use `_all` or to perform the operation on all indices. */ index: Indices + /** A comma-separated list of specific routing values. */ routing?: Routing + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the `hits._source` property of the search response. */ _source?: SearchSourceConfig + /** The request returns doc values for field names matching these patterns + * in the `hits.fields` property of the response. + * It accepts wildcard (`*`) patterns. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** A list of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the `_source` + * parameter defaults to `false`. You can pass `_source: true` to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields + /** The request returns values for field names matching these patterns + * in the `hits.fields` property of the response. + * It accepts wildcard (`*`) patterns. */ fields?: Fields + /** A query to filter the documents that can match. The kNN search will return the top + * `k` documents that also match this filter. The value can be a single query or a + * list of queries. If `filter` isn't provided, all documents are allowed to match. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The kNN query to run. */ knn: KnnSearchQuery + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } } export interface KnnSearchResponse { + /** The milliseconds it took Elasticsearch to run the request. */ took: long + /** If true, the request timed out before completion; + * returned results may be partial or empty. */ timed_out: boolean + /** A count of shards used for the request. */ _shards: ShardStatistics + /** The returned documents and metadata. */ hits: SearchHitsMetadata + /** The field values for the documents. These fields + * must be specified in the request using the `fields` parameter. */ fields?: Record + /** The highest returned document score. This value is null for requests + * that do not sort by score. */ max_score?: double } export interface KnnSearchQuery { + /** The name of the vector field to search against */ field: Field + /** The query vector */ query_vector: QueryVector + /** The final number of nearest neighbors to return as top hits */ k: integer + /** The number of nearest neighbor candidates to consider per shard */ num_candidates: integer } @@ -706,31 +1282,60 @@ export interface MgetMultiGetError { } export interface MgetOperation { + /** The unique document ID. */ _id: Id + /** The index that contains the document. */ _index?: IndexName + /** The key for the primary shard the document resides on. Required if routing is used during indexing. */ routing?: Routing + /** If `false`, excludes all _source fields. */ _source?: SearchSourceConfig + /** The stored fields you want to retrieve. */ stored_fields?: Fields version?: VersionNumber version_type?: VersionType } export interface MgetRequest extends RequestBase { + /** Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. */ index?: IndexName + /** Should this request force synthetic _source? + * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. + * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean + /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** If `true`, the request refreshes relevant shards before retrieving documents. */ refresh?: boolean + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** True or false to return the `_source` field or not, or a list of fields to return. */ _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** If `true`, retrieves the document fields stored in the index rather than the document `_source`. */ stored_fields?: Fields + /** The documents you want to retrieve. Required if no index is specified in the request URI. */ docs?: MgetOperation[] + /** The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. */ ids?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, docs?: never, ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, docs?: never, ids?: never } } export interface MgetResponse { + /** The response includes a docs array that contains the documents in the order specified in the request. + * The structure of the returned documents is similar to that returned by the get API. + * If there is a failure getting a particular document, the error is included in place of the document. */ docs: MgetResponseItem[] } @@ -747,35 +1352,81 @@ export interface MsearchMultiSearchResult + /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse + /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer + /** If true, returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines the approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ from?: integer highlight?: SearchHighlight + /** Boosts the _score of documents from specified indices. */ indices_boost?: Partial>[] + /** The minimum `_score` for matching documents. + * Documents with a lower `_score` are not included in search results and results collected by aggregations. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean rescore?: SearchRescore | SearchRescore[] + /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ size?: integer sort?: Sort + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ stats?: string[] + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string + /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits + /** If true, returns document version as part of a hit. */ version?: boolean + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ pit?: SearchPointInTimeReference suggest?: SearchSuggester } @@ -795,21 +1446,44 @@ export interface MsearchMultisearchHeader { } export interface MsearchRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and index aliases to search. */ index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Indicates whether hit.matched_queries should be rendered as a map that includes + * the name of the matched query associated with its score (true) + * or as an array containing the name of the matched queries (false) + * This functionality reruns each named query on every hit in a search response. + * Typically, this adds a small overhead to a request. + * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean + /** Maximum number of concurrent searches the multi search API can execute. */ max_concurrent_searches?: long + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ max_concurrent_shard_requests?: long + /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long + /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ rest_total_hits_as_int?: boolean + /** Custom routing value used to route search operations to a specific shard. */ routing?: Routing + /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ search_type?: SearchType + /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ typed_keys?: boolean searches?: MsearchRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } } export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody @@ -819,13 +1493,26 @@ export type MsearchResponse = MsearchMultiSearchItem | ErrorResponseBase export interface MsearchTemplateRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*`. */ index?: Indices + /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean + /** The maximum number of concurrent searches the API can run. */ max_concurrent_searches?: long + /** The type of the search operation. */ search_type?: SearchType + /** If `true`, the response returns `hits.total` as an integer. + * If `false`, it returns `hits.total` as an object. */ rest_total_hits_as_int?: boolean + /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean search_templates?: MsearchTemplateRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } } export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig @@ -833,44 +1520,88 @@ export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTempl export type MsearchTemplateResponse> = MsearchMultiSearchResult export interface MsearchTemplateTemplateConfig { + /** If `true`, returns detailed information about score calculation as part of each hit. */ explain?: boolean + /** The ID of the search template to use. If no `source` is specified, + * this parameter is required. */ id?: Id + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record + /** If `true`, the query execution is profiled. */ profile?: boolean + /** An inline search template. Supports the same parameters as the search API's + * request body. It also supports Mustache variables. If no `id` is specified, this + * parameter is required. */ source?: string } export interface MtermvectorsOperation { + /** The ID of the document. */ _id?: Id + /** The index of the document. */ _index?: IndexName + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: any + /** Comma-separated list or wildcard expressions of fields to include in the statistics. + * Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields + /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean + /** Filter terms based on their tf-idf scores. */ filter?: TermvectorsFilter + /** If `true`, the response includes term offsets. */ offsets?: boolean + /** If `true`, the response includes term payloads. */ payloads?: boolean + /** If `true`, the response includes term positions. */ positions?: boolean + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** If true, the response includes term frequency and document frequency. */ term_statistics?: boolean + /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber + /** Specific version type. */ version_type?: VersionType } export interface MtermvectorsRequest extends RequestBase { + /** The name of the index that contains the documents. */ index?: IndexName + /** A comma-separated list or wildcard expressions of fields to include in the statistics. + * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields + /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean + /** If `true`, the response includes term offsets. */ offsets?: boolean + /** If `true`, the response includes term payloads. */ payloads?: boolean + /** If `true`, the response includes term positions. */ positions?: boolean + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string + /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** If true, the response includes term frequency and document frequency. */ term_statistics?: boolean + /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** An array of existing or artificial documents. */ docs?: MtermvectorsOperation[] + /** A simplified syntax to specify documents by their ID if they're in the same index. */ ids?: Id[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, docs?: never, ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, docs?: never, ids?: never } } export interface MtermvectorsResponse { @@ -888,40 +1619,81 @@ export interface MtermvectorsTermVectorsResult { } export interface OpenPointInTimeRequest extends RequestBase { + /** A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices */ index: Indices + /** Extend the length of time that the point in time persists. */ keep_alive: Duration + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** The node or shard the operation should be performed on. + * By default, it is random. */ preference?: string + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. + * If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. + * If `true`, the point in time will contain all the shards that are available at the time of the request. */ allow_partial_search_results?: boolean + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ max_concurrent_shard_requests?: integer + /** Filter indices if the provided query rewrites to `match_none` on every shard. */ index_filter?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } } export interface OpenPointInTimeResponse { + /** Shards used to create the PIT */ _shards: ShardStatistics id: Id } export interface PingRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export type PingResponse = boolean export interface PutScriptRequest extends RequestBase { + /** The identifier for the stored script or search template. + * It must be unique within the cluster. */ id: Id + /** The context in which the script or search template should run. + * To prevent errors, the API immediately compiles the script or template in this context. */ context?: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration + /** The script or search template, its parameters, and its language. */ script: StoredScript + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, context?: never, master_timeout?: never, timeout?: never, script?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, context?: never, master_timeout?: never, timeout?: never, script?: never } } export type PutScriptResponse = AcknowledgedResponseBase export interface RankEvalDocumentRating { + /** The document ID. */ _id: Id + /** The document’s index. For data streams, this should be the document’s backing index. */ _index: IndexName + /** The document’s relevance with regard to this search request. */ rating: integer } @@ -945,21 +1717,28 @@ export interface RankEvalRankEvalMetric { } export interface RankEvalRankEvalMetricBase { + /** Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query. */ k?: integer } export interface RankEvalRankEvalMetricDetail { + /** The metric_score in the details section shows the contribution of this query to the global quality metric score */ metric_score: double + /** The unrated_docs section contains an _index and _id entry for each document in the search result for this query that didn’t have a ratings value. This can be used to ask the user to supply ratings for these documents */ unrated_docs: RankEvalUnratedDocument[] + /** The hits section shows a grouping of the search results with their supplied ratings */ hits: RankEvalRankEvalHitItem[] + /** The metric_details give additional information about the calculated quality metric (e.g. how many of the retrieved documents were relevant). The content varies for each metric but allows for better interpretation of the results */ metric_details: Record> } export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { + /** If set to true, this metric will calculate the Normalized DCG. */ normalize?: boolean } export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { + /** The highest relevance grade used in the user-supplied relevance judgments. */ maximum_relevance: integer } @@ -967,10 +1746,12 @@ export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEv } export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { + /** Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant. */ ignore_unlabeled?: boolean } export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { + /** Sets the rating threshold above which documents are considered to be "relevant". */ relevant_rating_threshold?: integer } @@ -983,25 +1764,45 @@ export interface RankEvalRankEvalQuery { } export interface RankEvalRankEvalRequestItem { + /** The search request’s ID, used to group result details later. */ id: Id + /** The query being evaluated. */ request?: RankEvalRankEvalQuery | QueryDslQueryContainer + /** List of document ratings */ ratings: RankEvalDocumentRating[] + /** The search template Id */ template_id?: Id + /** The search template parameters. */ params?: Record } export interface RankEvalRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and index aliases used to limit the request. + * Wildcard (`*`) expressions are supported. + * To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Search operation type */ search_type?: string + /** A set of typical search requests, together with their provided ratings. */ requests: RankEvalRankEvalRequestItem[] + /** Definition of the evaluation metric to calculate. */ metric?: RankEvalRankEvalMetric + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, search_type?: never, requests?: never, metric?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, search_type?: never, requests?: never, metric?: never } } export interface RankEvalResponse { + /** The overall evaluation quality calculated by the defined metric */ metric_score: double + /** The details section contains one entry for every query in the original requests section, keyed by the search request id */ details: Record failures: Record } @@ -1012,65 +1813,152 @@ export interface RankEvalUnratedDocument { } export interface ReindexDestination { + /** The name of the data stream, index, or index alias you are copying to. */ index: IndexName + /** If it is `create`, the operation will only index documents that do not already exist (also known as "put if absent"). + * + * IMPORTANT: To reindex to a data stream destination, this argument must be `create`. */ op_type?: OpType + /** The name of the pipeline to use. */ pipeline?: string + /** By default, a document's routing is preserved unless it's changed by the script. + * If it is `keep`, the routing on the bulk request sent for each match is set to the routing on the match. + * If it is `discard`, the routing on the bulk request sent for each match is set to `null`. + * If it is `=value`, the routing on the bulk request sent for each match is set to all value specified after the equals sign (`=`). */ routing?: Routing + /** The versioning to use for the indexing operation. */ version_type?: VersionType } export interface ReindexRemoteSource { + /** The remote connection timeout. */ connect_timeout?: Duration + /** An object containing the headers of the request. */ headers?: Record + /** The URL for the remote instance of Elasticsearch that you want to index from. + * This information is required when you're indexing from remote. */ host: Host + /** The username to use for authentication with the remote host. */ username?: Username + /** The password to use for authentication with the remote host. */ password?: Password + /** The remote socket read timeout. */ socket_timeout?: Duration } export interface ReindexRequest extends RequestBase { + /** If `true`, the request refreshes affected shards to make this operation visible to search. */ refresh?: boolean + /** The throttle for this request in sub-requests per second. + * By default, there is no throttle. */ requests_per_second?: float + /** The period of time that a consistent view of the index should be maintained for scrolled search. */ scroll?: Duration + /** The number of slices this task should be divided into. + * It defaults to one slice, which means the task isn't sliced into subtasks. + * + * Reindex supports sliced scroll to parallelize the reindexing process. + * This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. + * + * NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + * + * If set to `auto`, Elasticsearch chooses the number of slices to use. + * This setting will use one slice per shard, up to a certain limit. + * If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. */ slices?: Slices + /** The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. + * By default, Elasticsearch waits for at least one minute before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value is one, which means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean + /** If `true`, the destination must be an index alias. */ require_alias?: boolean + /** Indicates whether to continue reindexing even when there are conflicts. */ conflicts?: Conflicts + /** The destination you are copying to. */ dest: ReindexDestination + /** The maximum number of documents to reindex. + * By default, all documents are reindexed. + * If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. + * + * If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. */ max_docs?: long + /** The script to run to update the document source or metadata when reindexing. */ script?: Script | string size?: long + /** The source you are copying from. */ source: ReindexSource + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, requests_per_second?: never, scroll?: never, slices?: never, timeout?: never, wait_for_active_shards?: never, wait_for_completion?: never, require_alias?: never, conflicts?: never, dest?: never, max_docs?: never, script?: never, size?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, requests_per_second?: never, scroll?: never, slices?: never, timeout?: never, wait_for_active_shards?: never, wait_for_completion?: never, require_alias?: never, conflicts?: never, dest?: never, max_docs?: never, script?: never, size?: never, source?: never } } export interface ReindexResponse { + /** The number of scroll responses that were pulled back by the reindex. */ batches?: long + /** The number of documents that were successfully created. */ created?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** If there were any unrecoverable errors during the process, it is an array of those failures. + * If this array is not empty, the request ended because of those failures. + * Reindex is implemented using batches and any failure causes the entire process to end but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent the reindex from ending on version conflicts. */ failures?: BulkIndexByScrollFailure[] + /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ noops?: long + /** The number of retries attempted by reindex. */ retries?: Retries + /** The number of requests per second effectively run during the reindex. */ requests_per_second?: float slice_id?: integer task?: TaskId + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: EpochTime + /** This field should always be equal to zero in a reindex response. + * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) that a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: EpochTime + /** If any of the requests that ran during the reindex timed out, it is `true`. */ timed_out?: boolean + /** The total milliseconds the entire operation took. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of documents that were successfully updated. + * That is to say, a document with the same ID already existed before the reindex updated it. */ updated?: long + /** The number of version conflicts that occurred. */ version_conflicts?: long } export interface ReindexSource { + /** The name of the data stream, index, or alias you are copying from. + * It accepts a comma-separated list to reindex from multiple sources. */ index: Indices + /** The documents to reindex, which is defined with Query DSL. */ query?: QueryDslQueryContainer + /** A remote instance of Elasticsearch that you want to index from. */ remote?: ReindexRemoteSource + /** The number of documents to index per batch. + * Use it when you are indexing from remote to ensure that the batches fit within the on-heap buffer, which defaults to a maximum size of 100 MB. */ size?: integer + /** Slice the reindex request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll + /** A comma-separated list of `:` pairs to sort by before indexing. + * Use it in conjunction with `max_docs` to control what documents are reindexed. + * + * WARNING: Sort in reindex is deprecated. + * Sorting in reindex was never guaranteed to index documents in order and prevents further development of reindex such as resilience and performance improvements. + * If used in combination with `max_docs`, consider using a query filter instead. */ sort?: Sort + /** If `true`, reindex all source fields. + * Set it to a list to reindex select fields. */ _source?: Fields runtime_mappings?: MappingRuntimeFields } @@ -1080,18 +1968,30 @@ export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { } export interface ReindexRethrottleReindexStatus { + /** The number of scroll responses pulled back by the reindex. */ batches: long + /** The number of documents that were successfully created. */ created: long + /** The number of documents that were successfully deleted. */ deleted: long + /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ noops: long + /** The number of requests per second effectively executed during the reindex. */ requests_per_second: float + /** The number of retries attempted by reindex. `bulk` is the number of bulk actions retried and `search` is the number of search actions retried. */ retries: Retries throttled?: Duration + /** Number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in a `_reindex` response. + * It only has meaning when using the Task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. */ throttled_until_millis: DurationValue + /** The number of documents that were successfully processed. */ total: long + /** The number of documents that were successfully updated, for example, a document with same ID already existed prior to reindex updating it. */ updated: long + /** The number of version conflicts that reindex hits. */ version_conflicts: long } @@ -1109,8 +2009,15 @@ export interface ReindexRethrottleReindexTask { } export interface ReindexRethrottleRequest extends RequestBase { + /** The task identifier, which can be found by using the tasks API. */ task_id: Id + /** The throttle for this request in sub-requests per second. + * It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. */ requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } } export interface ReindexRethrottleResponse { @@ -1118,10 +2025,23 @@ export interface ReindexRethrottleResponse { } export interface RenderSearchTemplateRequest extends RequestBase { + /** The ID of the search template to render. + * If no `source` is specified, this or the `id` request body parameter is required. */ id?: Id file?: string + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record + /** An inline search template. + * It supports the same parameters as the search API's request body. + * These parameters also support Mustache variables. + * If no `id` or `` is specified, this parameter is required. */ source?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } } export interface RenderSearchTemplateResponse { @@ -1131,15 +2051,33 @@ export interface RenderSearchTemplateResponse { export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field' export interface ScriptsPainlessExecutePainlessContextSetup { + /** Document that's temporarily indexed in-memory and accessible from the script. */ document: any + /** Index containing a mapping that's compatible with the indexed document. + * You may specify a remote index by prefixing the index with the remote cluster alias. + * For example, `remote1:my_index` indicates that you want to run the painless script against the "my_index" index on the "remote1" cluster. + * This request will be forwarded to the "remote1" cluster if you have configured a connection to that remote cluster. + * + * NOTE: Wildcards are not accepted in the index expression for this endpoint. + * The expression `*:myindex` will return the error "No such remote cluster" and the expression `logs*` or `remote1:logs*` will return the error "index not found". */ index: IndexName + /** Use this parameter to specify a query for computing a score. */ query?: QueryDslQueryContainer } export interface ScriptsPainlessExecuteRequest extends RequestBase { + /** The context that the script should run in. + * NOTE: Result ordering in the field contexts is not guaranteed. */ context?: ScriptsPainlessExecutePainlessContext + /** Additional parameters for the `context`. + * NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. */ context_setup?: ScriptsPainlessExecutePainlessContextSetup + /** The Painless script to run. */ script?: Script | string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { context?: never, context_setup?: never, script?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { context?: never, context_setup?: never, script?: never } } export interface ScriptsPainlessExecuteResponse { @@ -1147,90 +2085,275 @@ export interface ScriptsPainlessExecuteResponse { } export interface ScrollRequest extends RequestBase { + /** The scroll ID */ scroll_id?: ScrollId + /** If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. */ rest_total_hits_as_int?: boolean + /** The period to retain the search context for scrolling. */ scroll?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { scroll_id?: never, rest_total_hits_as_int?: never, scroll?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { scroll_id?: never, rest_total_hits_as_int?: never, scroll?: never } } export type ScrollResponse> = SearchResponseBody export interface SearchRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** If `true` and there are shard request timeouts or shard failures, the request returns partial results. + * If `false`, it returns an error with no partial results. + * + * To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. */ allow_partial_search_results?: boolean + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** The number of shard results that should be reduced at once on the coordinating node. + * If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. */ batched_reduce_size?: long + /** If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. */ ccs_minimize_roundtrips?: boolean + /** The default operator for the query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices will be ignored when frozen. */ ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the response includes the score contribution from any named queries. + * + * This functionality reruns each named query on every hit in a search response. + * Typically, this adds a small overhead to a request. + * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The number of concurrent shard requests per node that the search runs concurrently. + * This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. */ max_concurrent_shard_requests?: long + /** The minimum version of the node that can handle the request + * Any handling node with a lower version will fail the request. */ min_compatible_shard_node?: VersionString + /** The nodes and shards used for the search. + * By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. + * Valid values are: + * + * * `_only_local` to run the search only on shards on the local node; + * * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method; + * * `_only_nodes:,` to run the search on only the specified nodes IDs, where, if suitable shards exist on more than one selected node, use shards on those nodes using the default method, or if none of the specified nodes are available, select shards from any available node using the default method; + * * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs, or if not, select shards using the default method; + * * `_shards:,` to run the search only on the specified shards; + * * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ preference?: string + /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. + * This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). + * When unspecified, the pre-filter phase is executed if any of these conditions is met: + * + * * The request targets more than 128 shards. + * * The request targets one or more read-only index. + * * The primary sort of the query targets an indexed field. */ pre_filter_shard_size?: long + /** If `true`, the caching of search results is enabled for requests where `size` is `0`. + * It defaults to index level settings. */ request_cache?: boolean + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** The period to retain the search context for scrolling. + * By default, this value cannot exceed `1d` (24 hours). + * You can change this limit by using the `search.max_keep_alive` cluster-level setting. */ scroll?: Duration + /** Indicates how distributed term frequencies are calculated for relevance scoring. */ search_type?: SearchType + /** The field to use for suggestions. */ suggest_field?: Field + /** The suggest mode. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_mode?: SuggestMode + /** The number of suggestions to return. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_size?: long + /** The source text for which the suggestions should be returned. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_text?: string + /** If `true`, aggregation and suggester names are be prefixed by their respective types in the response. */ typed_keys?: boolean + /** Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. */ rest_total_hits_as_int?: boolean + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields + /** A query in the Lucene query string syntax. + * Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. + * + * IMPORTANT: This parameter overrides the query parameter in the request body. + * If both parameters are specified, documents matching the query request body parameter are not returned. */ q?: string + /** Should this request force synthetic _source? + * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. + * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean + /** Defines the aggregations that are run as part of the search request. */ aggregations?: Record - /** @alias aggregations */ + /** Defines the aggregations that are run as part of the search request. + * @alias aggregations */ aggs?: Record + /** Collapses search results the values of the specified field. */ collapse?: SearchFieldCollapse + /** If `true`, the request returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record + /** The starting document offset, which must be non-negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. + * If `true`, the exact number of hits is returned at the cost of some performance. + * If `false`, the response does not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits + /** Boost the `_score` of documents from specified indices. + * The boost value is the factor by which scores are multiplied. + * A boost value greater than `1.0` increases the score. + * A boost value between `0` and `1.0` decreases the score. */ indices_boost?: Partial>[] + /** An array of wildcard (`*`) field patterns. + * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** The approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] + /** The Reciprocal Rank Fusion (RRF) to use. + * @remarks This property is not supported on Elastic Cloud Serverless. */ rank?: RankContainer + /** The minimum `_score` for matching documents. + * Documents with a lower `_score` are not included in search results and results collected by aggregations. */ min_score?: double + /** Use the `post_filter` parameter to filter search results. + * The search hits are filtered after the aggregations are calculated. + * A post filter has no impact on the aggregation results. */ post_filter?: QueryDslQueryContainer + /** Set to `true` to return detailed timing information about the execution of individual components in a search request. + * NOTE: This is a debugging tool and adds significant overhead to search execution. */ profile?: boolean + /** The search definition using the Query DSL. */ query?: QueryDslQueryContainer + /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ rescore?: SearchRescore | SearchRescore[] + /** A retriever is a specification to describe top documents returned from a search. + * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ retriever?: RetrieverContainer + /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record + /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults + /** The number of hits to return, which must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` property. */ size?: integer + /** Split a scrolled search into multiple slices that can be consumed independently. */ slice?: SlicedScroll + /** A comma-separated list of : pairs. */ sort?: Sort + /** The source fields that are returned for matching documents. + * These fields are returned in the `hits._source` property of the search response. + * If the `stored_fields` property is specified, the `_source` property defaults to `false`. + * Otherwise, it defaults to `true`. */ _source?: SearchSourceConfig + /** An array of wildcard (`*`) field patterns. + * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines a suggester that provides similar looking terms based on a provided text. */ suggest?: SearchSuggester + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this property to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. + * + * If set to `0` (default), the query does not terminate early. */ terminate_after?: long + /** The period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string + /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If `true`, the request returns the document version as part of a hit. */ version?: boolean + /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` property defaults to `false`. + * You can pass `_source: true` to return both source fields and stored fields in the search response. */ stored_fields?: Fields + /** Limit the search to a point in time (PIT). + * If you provide a PIT, you cannot specify an `` in the request path. */ pit?: SearchPointInTimeReference + /** One or more runtime fields in the search request. + * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** The stats groups to associate with the search. + * Each group maintains a statistics aggregation for its associated searches. + * You can retrieve these stats using the indices stats API. */ stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, min_compatible_shard_node?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, min_compatible_shard_node?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export type SearchResponse> = SearchResponseBody export interface SearchResponseBody> { + /** The number of milliseconds it took Elasticsearch to run the request. + * This value is calculated by measuring the time elapsed between receipt of a request on the coordinating node and the time at which the coordinating node is ready to send the response. + * It includes: + * + * * Communication time between the coordinating node and data nodes + * * Time the request spends in the search thread pool, queued for execution + * * Actual run time + * + * It does not include: + * + * * Time needed to send the request to Elasticsearch + * * Time needed to serialize the JSON response + * * Time needed to send the response to a client */ took: long + /** If `true`, the request timed out before completion; returned results may be partial or empty. */ timed_out: boolean + /** A count of shards used for the request. */ _shards: ShardStatistics + /** The returned documents and metadata. */ hits: SearchHitsMetadata aggregations?: TAggregations _clusters?: ClusterStatistics @@ -1239,6 +2362,9 @@ export interface SearchResponseBody[]> terminated_early?: boolean @@ -1320,10 +2446,20 @@ export interface SearchCollector { } export interface SearchCompletionContext { + /** The factor by which the score of the suggestion should be boosted. + * The score is computed by multiplying the boost with the suggestion weight. */ boost?: double + /** The value of the category to filter/boost on. */ context: SearchContext + /** An array of precision values at which neighboring geohashes should be taken into account. + * Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash precision (`1`..`12`). + * Defaults to generating neighbors for index time precision level. */ neighbours?: GeoHashPrecision[] + /** The precision of the geohash to encode the query geo point. + * Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw geohash precision (`1`..`12`). + * Defaults to index time precision level. */ precision?: GeoHashPrecision + /** Whether the category value should be treated as a prefix or not. */ prefix?: boolean } @@ -1345,9 +2481,13 @@ export interface SearchCompletionSuggestOption { } export interface SearchCompletionSuggester extends SearchSuggesterBase { + /** A value, geo point object, or a geo hash string to filter or boost the suggestion on. */ contexts?: Record + /** Enables fuzziness, meaning you can have a typo in your search and still get results back. */ fuzzy?: SearchSuggestFuzziness + /** A regex query that expresses a prefix as a regular expression. */ regex?: SearchRegexOptions + /** Whether duplicate suggestions should be filtered out. */ skip_duplicates?: boolean } @@ -1387,16 +2527,38 @@ export interface SearchDfsStatisticsProfile { } export interface SearchDirectGenerator { + /** The field to fetch the candidate suggestions from. + * Needs to be set globally or per suggestion. */ field: Field + /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. + * Can only be `1` or `2`. */ max_edits?: integer + /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. + * Can improve accuracy at the cost of performance. */ max_inspections?: float + /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. + * This can be used to exclude high frequency terms—which are usually spelled correctly—from being spellchecked. + * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. + * If a value higher than 1 is specified, then fractional can not be specified. */ max_term_freq?: float + /** The minimal threshold in number of documents a suggestion should appear in. + * This can improve quality by only suggesting high frequency terms. + * Can be specified as an absolute number or as a relative percentage of number of documents. + * If a value higher than 1 is specified, the number cannot be fractional. */ min_doc_freq?: float + /** The minimum length a suggest text term must have in order to be included. */ min_word_length?: integer + /** A filter (analyzer) that is applied to each of the generated tokens before they are passed to the actual phrase scorer. */ post_filter?: string + /** A filter (analyzer) that is applied to each of the tokens passed to this candidate generator. + * This filter is applied to the original token before candidates are generated. */ pre_filter?: string + /** The number of minimal prefix characters that must match in order be a candidate suggestions. + * Increasing this number improves spellcheck performance. */ prefix_length?: integer + /** The maximum corrections to be returned per suggest text token. */ size?: integer + /** Controls what suggestions are included on the suggestions generated on each shard. */ suggest_mode?: SuggestMode } @@ -1426,18 +2588,28 @@ export interface SearchFetchProfileDebug { } export interface SearchFieldCollapse { + /** The field to collapse the result set on */ field: Field + /** The number of inner hits and their sort order */ inner_hits?: SearchInnerHits | SearchInnerHits[] + /** The number of concurrent requests allowed to retrieve the inner_hits per group */ max_concurrent_group_searches?: integer collapse?: SearchFieldCollapse } export interface SearchFieldSuggester { + /** Provides auto-complete/search-as-you-type functionality. */ completion?: SearchCompletionSuggester + /** Provides access to word alternatives on a per token basis within a certain string distance. */ phrase?: SearchPhraseSuggester + /** Suggests terms based on edit distance. */ term?: SearchTermSuggester + /** Prefix used to search for suggestions. */ prefix?: string + /** A prefix expressed as a regular expression. */ regex?: string + /** The text to use as input for the suggester. + * Needs to be set globally or per suggestion. */ text?: string } @@ -1448,25 +2620,61 @@ export interface SearchHighlight extends SearchHighlightBase { export interface SearchHighlightBase { type?: SearchHighlighterType + /** A string that contains each boundary character. */ boundary_chars?: string + /** How far to scan for boundary characters. */ boundary_max_scan?: integer + /** Specifies how to break the highlighted fragments: chars, sentence, or word. + * Only valid for the unified and fvh highlighters. + * Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter. */ boundary_scanner?: SearchBoundaryScanner + /** Controls which locale is used to search for sentence and word boundaries. + * This parameter takes a form of a language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. */ boundary_scanner_locale?: string force_source?: boolean + /** Specifies how text should be broken up in highlight snippets: `simple` or `span`. + * Only valid for the `plain` highlighter. */ fragmenter?: SearchHighlighterFragmenter + /** The size of the highlighted fragment in characters. */ fragment_size?: integer highlight_filter?: boolean + /** Highlight matches for a query other than the search query. + * This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. */ highlight_query?: QueryDslQueryContainer max_fragment_length?: integer + /** If set to a non-negative value, highlighting stops at this defined maximum limit. + * The rest of the text is not processed, thus not highlighted and no error is returned + * The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting. */ max_analyzed_offset?: integer + /** The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight. */ no_match_size?: integer + /** The maximum number of fragments to return. + * If the number of fragments is set to `0`, no fragments are returned. + * Instead, the entire field contents are highlighted and returned. + * This can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required. + * If `number_of_fragments` is `0`, `fragment_size` is ignored. */ number_of_fragments?: integer options?: Record + /** Sorts highlighted fragments by score when set to `score`. + * By default, fragments will be output in the order they appear in the field (order: `none`). + * Setting this option to `score` will output the most relevant fragments first. + * Each highlighter applies its own logic to compute relevancy scores. */ order?: SearchHighlighterOrder + /** Controls the number of matching phrases in a document that are considered. + * Prevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory. + * When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. + * Only supported by the `fvh` highlighter. */ phrase_limit?: integer + /** Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text. + * By default, highlighted text is wrapped in `` and `` tags. */ post_tags?: string[] + /** Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text. + * By default, highlighted text is wrapped in `` and `` tags. */ pre_tags?: string[] + /** By default, only fields that contains a query match are highlighted. + * Set to `false` to highlight all fields. */ require_field_match?: boolean + /** Set to `styled` to use the built-in tag schema. */ tags_schema?: SearchHighlighterTagsSchema } @@ -1509,14 +2717,19 @@ export interface SearchHit { } export interface SearchHitsMetadata { + /** Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. */ total?: SearchTotalHits | long hits: SearchHit[] max_score?: double | null } export interface SearchInnerHits { + /** The name for the particular inner hit definition in the response. + * Useful when a search request contains multiple inner hits. */ name?: Name + /** The maximum number of hits to return per `inner_hits`. */ size?: integer + /** Inner hit starting document offset. */ from?: integer collapse?: SearchFieldCollapse docvalue_fields?: (QueryDslFieldAndFormat | Field)[] @@ -1526,6 +2739,8 @@ export interface SearchInnerHits { script_fields?: Record seq_no_primary_term?: boolean fields?: Field[] + /** How the inner hits should be sorted per `inner_hits`. + * By default, inner hits are sorted by score. */ sort?: Sort _source?: SearchSourceConfig stored_fields?: Fields @@ -1579,11 +2794,14 @@ export interface SearchKnnQueryProfileResult { } export interface SearchLaplaceSmoothingModel { + /** A constant that is added to all counts to balance weights. */ alpha: double } export interface SearchLearningToRank { + /** The unique identifier of the trained model uploaded to Elasticsearch */ model_id: string + /** Named parameters to be passed to the query templates used for feature */ params?: Record } @@ -1604,18 +2822,25 @@ export interface SearchPhraseSuggest extends SearchSuggestBase { } export interface SearchPhraseSuggestCollate { + /** Parameters to use if the query is templated. */ params?: Record + /** Returns all suggestions with an extra `collate_match` option indicating whether the generated phrase matched any document. */ prune?: boolean + /** A collate query that is run once for every suggestion. */ query: SearchPhraseSuggestCollateQuery } export interface SearchPhraseSuggestCollateQuery { + /** The search template ID. */ id?: Id + /** The query source. */ source?: string } export interface SearchPhraseSuggestHighlight { + /** Use in conjunction with `pre_tag` to define the HTML tags to use for the highlighted text. */ post_tag: string + /** Use in conjunction with `post_tag` to define the HTML tags to use for the highlighted text. */ pre_tag: string } @@ -1627,17 +2852,35 @@ export interface SearchPhraseSuggestOption { } export interface SearchPhraseSuggester extends SearchSuggesterBase { + /** Checks each suggestion against the specified query to prune suggestions for which no matching docs exist in the index. */ collate?: SearchPhraseSuggestCollate + /** Defines a factor applied to the input phrases score, which is used as a threshold for other suggest candidates. + * Only candidates that score higher than the threshold will be included in the result. */ confidence?: double + /** A list of candidate generators that produce a list of possible terms per term in the given text. */ direct_generator?: SearchDirectGenerator[] force_unigrams?: boolean + /** Sets max size of the n-grams (shingles) in the field. + * If the field doesn’t contain n-grams (shingles), this should be omitted or set to `1`. + * If the field uses a shingle filter, the `gram_size` is set to the `max_shingle_size` if not explicitly set. */ gram_size?: integer + /** Sets up suggestion highlighting. + * If not provided, no highlighted field is returned. */ highlight?: SearchPhraseSuggestHighlight + /** The maximum percentage of the terms considered to be misspellings in order to form a correction. + * This method accepts a float value in the range `[0..1)` as a fraction of the actual query terms or a number `>=1` as an absolute number of query terms. */ max_errors?: double + /** The likelihood of a term being misspelled even if the term exists in the dictionary. */ real_word_error_likelihood?: double + /** The separator that is used to separate terms in the bigram field. + * If not set, the whitespace character is used as a separator. */ separator?: string + /** Sets the maximum number of suggested terms to be retrieved from each individual shard. */ shard_size?: integer + /** The smoothing model used to balance weight between infrequent grams (grams (shingles) are not existing in the index) and frequent grams (appear at least once in the index). + * The default model is Stupid Backoff. */ smoothing?: SearchSmoothingModelContainer + /** The text/query to provide suggestions for. */ text?: string token_limit?: integer } @@ -1683,7 +2926,9 @@ export interface SearchQueryProfile { } export interface SearchRegexOptions { + /** Optional operators for the regular expression. */ flags?: integer | string + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer } @@ -1694,9 +2939,14 @@ export interface SearchRescore { } export interface SearchRescoreQuery { + /** The query to use for rescoring. + * This query is only run on the Top-K results returned by the `query` and `post_filter` phases. */ rescore_query: QueryDslQueryContainer + /** Relative importance of the original query versus the rescore query. */ query_weight?: double + /** Relative importance of the rescore query versus the original query. */ rescore_query_weight?: double + /** Determines how scores are combined. */ score_mode?: SearchScoreMode } @@ -1721,8 +2971,11 @@ export interface SearchShardProfile { } export interface SearchSmoothingModelContainer { + /** A smoothing model that uses an additive smoothing where a constant (typically `1.0` or smaller) is added to all counts to balance weights. */ laplace?: SearchLaplaceSmoothingModel + /** A smoothing model that takes the weighted mean of the unigrams, bigrams, and trigrams based on user supplied weights (lambdas). */ linear_interpolation?: SearchLinearInterpolationSmoothingModel + /** A simple backoff model that backs off to lower order n-gram models if the higher order count is `0` and discounts the lower order n-gram model by a constant factor. */ stupid_backoff?: SearchStupidBackoffSmoothingModel } @@ -1731,16 +2984,27 @@ export type SearchSourceConfig = boolean | SearchSourceFilter | Fields export type SearchSourceConfigParam = boolean | Fields export interface SearchSourceFilter { + /** If `true`, vector fields are excluded from the returned source. + * + * This option takes precedence over `includes`: any vector field will + * remain excluded even if it matches an `includes` rule. */ exclude_vectors?: boolean + /** A list of fields to exclude from the returned source. */ excludes?: Fields + /** A list of fields to exclude from the returned source. + * @alias excludes */ exclude?: Fields + /** A list of fields to include in the returned source. */ includes?: Fields + /** A list of fields to include in the returned source. + * @alias includes */ include?: Fields } export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' export interface SearchStupidBackoffSmoothingModel { + /** A constant factor that the lower order n-gram model is discounted by. */ discount: double } @@ -1753,24 +3017,36 @@ export interface SearchSuggestBase { } export interface SearchSuggestFuzziness { + /** The fuzziness factor. */ fuzziness?: Fuzziness + /** Minimum length of the input before fuzzy suggestions are returned. */ min_length?: integer + /** Minimum length of the input, which is not checked for fuzzy alternatives. */ prefix_length?: integer + /** If set to `true`, transpositions are counted as one change instead of two. */ transpositions?: boolean + /** If `true`, all measurements (like fuzzy edit distance, transpositions, and lengths) are measured in Unicode code points instead of in bytes. + * This is slightly slower than raw bytes. */ unicode_aware?: boolean } export type SearchSuggestSort = 'score' | 'frequency' export interface SearchSuggesterKeys { + /** Global suggest text, to avoid repetition when the same text is used in several suggesters */ text?: string } export type SearchSuggester = SearchSuggesterKeys & { [property: string]: SearchFieldSuggester | string } export interface SearchSuggesterBase { + /** The field to fetch the candidate suggestions from. + * Needs to be set globally or per suggestion. */ field: Field + /** The analyzer to analyze the suggest text with. + * Defaults to the search analyzer of the suggest field. */ analyzer?: string + /** The maximum corrections to be returned per suggest text token. */ size?: integer } @@ -1788,16 +3064,36 @@ export interface SearchTermSuggestOption { export interface SearchTermSuggester extends SearchSuggesterBase { lowercase_terms?: boolean + /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. + * Can only be `1` or `2`. */ max_edits?: integer + /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. + * Can improve accuracy at the cost of performance. */ max_inspections?: integer + /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. + * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. + * If a value higher than 1 is specified, then fractional can not be specified. */ max_term_freq?: float + /** The minimal threshold in number of documents a suggestion should appear in. + * This can improve quality by only suggesting high frequency terms. + * Can be specified as an absolute number or as a relative percentage of number of documents. + * If a value higher than 1 is specified, then the number cannot be fractional. */ min_doc_freq?: float + /** The minimum length a suggest text term must have in order to be included. */ min_word_length?: integer + /** The number of minimal prefix characters that must match in order be a candidate for suggestions. + * Increasing this number improves spellcheck performance. */ prefix_length?: integer + /** Sets the maximum number of suggestions to be retrieved from each individual shard. */ shard_size?: integer + /** Defines how suggestions should be sorted per suggest text term. */ sort?: SearchSuggestSort + /** The string distance implementation to use for comparing how similar suggested terms are. */ string_distance?: SearchStringDistance + /** Controls what suggestions are included or controls for what suggest text terms, suggestions should be suggested. */ suggest_mode?: SuggestMode + /** The suggest text. + * Needs to be set globally or per suggestion. */ text?: string } @@ -1811,25 +3107,93 @@ export type SearchTotalHitsRelation = 'eq' | 'gte' export type SearchTrackHits = boolean | integer export interface SearchMvtRequest extends RequestBase { + /** Comma-separated list of data streams, indices, or aliases to search */ index: Indices + /** Field containing geospatial data to return */ field: Field + /** Zoom level for the vector tile to search */ zoom: SearchMvtZoomLevel + /** X coordinate for the vector tile to search */ x: SearchMvtCoordinate + /** Y coordinate for the vector tile to search */ y: SearchMvtCoordinate + /** Sub-aggregations for the geotile_grid. + * + * It supports the following aggregation types: + * + * - `avg` + * - `boxplot` + * - `cardinality` + * - `extended stats` + * - `max` + * - `median absolute deviation` + * - `min` + * - `percentile` + * - `percentile-rank` + * - `stats` + * - `sum` + * - `value count` + * + * The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. */ aggs?: Record + /** The size, in pixels, of a clipping buffer outside the tile. This allows renderers + * to avoid outline artifacts from geometries that extend past the extent of the tile. */ buffer?: integer + /** If `false`, the meta layer's feature is the bounding box of the tile. + * If `true`, the meta layer's feature is a bounding box resulting from a + * `geo_bounds` aggregation. The aggregation runs on values that intersect + * the `//` tile with `wrap_longitude` set to `false`. The resulting + * bounding box may be larger than the vector tile. */ exact_bounds?: boolean + /** The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. */ extent?: integer + /** The fields to return in the `hits` layer. + * It supports wildcards (`*`). + * This parameter does not support fields with array values. Fields with array + * values may return inconsistent results. */ fields?: Fields + /** The aggregation used to create a grid for the `field`. */ grid_agg?: SearchMvtGridAggregationType + /** Additional zoom levels available through the aggs layer. For example, if `` is `7` + * and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results + * don't include the aggs layer. */ grid_precision?: integer + /** Determines the geometry type for features in the aggs layer. In the aggs layer, + * each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon + * of the cells bounding box. If `point`, each feature is a Point that is the centroid + * of the cell. */ grid_type?: SearchMvtGridType + /** The query DSL used to filter documents for the search. */ query?: QueryDslQueryContainer + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** The maximum number of features to return in the hits layer. Accepts 0-10000. + * If 0, results don't include the hits layer. */ size?: integer + /** Sort the features in the hits layer. By default, the API calculates a bounding + * box for each feature. It sorts features based on this box's diagonal length, + * from longest to shortest. */ sort?: Sort + /** The number of hits matching the query to count accurately. If `true`, the exact number + * of hits is returned at the cost of some performance. If `false`, the response does + * not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits + /** If `true`, the hits and aggs layers will contain additional point features representing + * suggested label positions for the original features. + * + * * `Point` and `MultiPoint` features will have one of the points selected. + * * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. + * * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. + * * The aggregation results will provide one central point for each aggregation bucket. + * + * All attributes from the original features will also be copied to the new label features. + * In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ with_labels?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } } export type SearchMvtResponse = MapboxVectorTiles @@ -1843,14 +3207,35 @@ export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer export interface SearchShardsRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * IT can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, preference?: never, routing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, preference?: never, routing?: never } } export interface SearchShardsResponse { @@ -1860,10 +3245,14 @@ export interface SearchShardsResponse { } export interface SearchShardsSearchShardsNodeAttributes { + /** The human-readable identifier of the node. */ name: NodeName + /** The ephemeral ID of the node. */ ephemeral_id: Id + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress external_id: string + /** Lists node attributes. */ attributes: Record roles: NodeRoles version: VersionString @@ -1877,23 +3266,58 @@ export interface SearchShardsShardStoreIndex { } export interface SearchTemplateRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. */ ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** Specifies how long a consistent view of the index + * should be maintained for scrolled search. */ scroll?: Duration + /** The type of the search operation. */ search_type?: SearchType + /** If `true`, `hits.total` is rendered as an integer in the response. + * If `false`, it is rendered as an object. */ rest_total_hits_as_int?: boolean + /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean + /** If `true`, returns detailed information about score calculation as part of each hit. + * If you specify both this and the `explain` query parameter, the API uses only the query parameter. */ explain?: boolean + /** The ID of the search template to use. If no `source` is specified, + * this parameter is required. */ id?: Id + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record + /** If `true`, the query execution is profiled. */ profile?: boolean + /** An inline search template. Supports the same parameters as the search API's + * request body. It also supports Mustache variables. If no `id` is specified, this + * parameter is required. */ source?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } } export interface SearchTemplateResponse { @@ -1914,19 +3338,41 @@ export interface SearchTemplateResponse { } export interface TermsEnumRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and index aliases to search. + * Wildcard (`*`) expressions are supported. + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: IndexName + /** The string to match at the start of indexed terms. If not provided, all terms in the field are considered. */ field: Field + /** The number of matching terms to return. */ size?: integer + /** The maximum length of time to spend collecting results. + * If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. */ timeout?: Duration + /** When `true`, the provided search string is matched against index terms without case sensitivity. */ case_insensitive?: boolean + /** Filter an index shard if the provided query rewrites to `match_none`. */ index_filter?: QueryDslQueryContainer + /** The string to match at the start of indexed terms. + * If it is not provided, all terms in the field are considered. + * + * > info + * > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. */ string?: string + /** The string after which terms in the index should be returned. + * It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. */ search_after?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } } export interface TermsEnumResponse { _shards: ShardStatistics terms: string[] + /** If `false`, the returned terms set may be incomplete and should be treated as approximate. + * This can occur due to a few reasons, such as a request timeout or a node error. */ complete: boolean } @@ -1937,32 +3383,77 @@ export interface TermvectorsFieldStatistics { } export interface TermvectorsFilter { + /** Ignore words which occur in more than this many docs. + * Defaults to unbounded. */ max_doc_freq?: integer + /** The maximum number of terms that must be returned per field. */ max_num_terms?: integer + /** Ignore words with more than this frequency in the source doc. + * It defaults to unbounded. */ max_term_freq?: integer + /** The maximum word length above which words will be ignored. + * Defaults to unbounded. */ max_word_length?: integer + /** Ignore terms which do not occur in at least this many docs. */ min_doc_freq?: integer + /** Ignore words with less than this frequency in the source doc. */ min_term_freq?: integer + /** The minimum word length below which words will be ignored. */ min_word_length?: integer } export interface TermvectorsRequest extends RequestBase { + /** The name of the index that contains the document. */ index: IndexName + /** A unique identifier for the document. */ id?: Id + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string + /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: TDocument + /** Filter terms based on their tf-idf scores. + * This could be useful in order find out a good characteristic vector of a document. + * This feature works in a similar manner to the second phase of the More Like This Query. */ filter?: TermvectorsFilter + /** Override the default per-field analyzer. + * This is useful in order to generate term vectors in any fashion, especially when using artificial documents. + * When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ per_field_analyzer?: Record + /** A list of fields to include in the statistics. + * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Field[] + /** If `true`, the response includes: + * + * * The document count (how many documents contain this field). + * * The sum of document frequencies (the sum of document frequencies for all terms in this field). + * * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ field_statistics?: boolean + /** If `true`, the response includes term offsets. */ offsets?: boolean + /** If `true`, the response includes term payloads. */ payloads?: boolean + /** If `true`, the response includes term positions. */ positions?: boolean + /** If `true`, the response includes: + * + * * The total term frequency (how often a term occurs in all documents). + * * The document frequency (the number of documents containing the current term). + * + * By default these values are not returned since term statistics can have a serious performance impact. */ term_statistics?: boolean + /** A custom value that is used to route operations to a specific shard. */ routing?: Routing + /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber + /** The version type. */ version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, id?: never, preference?: never, realtime?: never, doc?: never, filter?: never, per_field_analyzer?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, term_statistics?: never, routing?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, id?: never, preference?: never, realtime?: never, doc?: never, filter?: never, per_field_analyzer?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, term_statistics?: never, routing?: never, version?: never, version_type?: never } } export interface TermvectorsResponse { @@ -1995,27 +3486,63 @@ export interface TermvectorsToken { } export interface UpdateRequest extends RequestBase { + /** A unique identifier for the document to be updated. */ id: Id + /** The name of the target index. + * By default, the index is created automatically if it doesn't exist. */ index: IndexName + /** Only perform the operation if the document has this primary term. */ if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean + /** The script language. */ lang?: string + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', it does nothing with refreshes. */ refresh?: Refresh + /** If `true`, the destination must be an index alias. */ require_alias?: boolean + /** The number of times the operation should be retried when a conflict occurs. */ retry_on_conflict?: integer + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** The period to wait for the following operations: dynamic mapping updates and waiting for active shards. + * Elasticsearch waits for at least the timeout period before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** The number of copies of each shard that must be active before proceeding with the operation. + * Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards + /** The source fields you want to exclude. */ _source_excludes?: Fields + /** The source fields you want to retrieve. */ _source_includes?: Fields + /** If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. */ detect_noop?: boolean + /** A partial update to an existing document. + * If both `doc` and `script` are specified, `doc` is ignored. */ doc?: TPartialDocument + /** If `true`, use the contents of 'doc' as the value of 'upsert'. + * NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. */ doc_as_upsert?: boolean + /** The script to run to update the document. */ script?: Script | string + /** If `true`, run the script whether or not the document exists. */ scripted_upsert?: boolean + /** If `false`, turn off source retrieval. + * You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig + /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. + * If the document exists, the 'script' is run. */ upsert?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } } export type UpdateResponse = UpdateUpdateWriteResponseBase @@ -2025,65 +3552,163 @@ export interface UpdateUpdateWriteResponseBase extends Writ } export interface UpdateByQueryRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** Skips the specified number of documents. */ from?: long + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. + * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string + /** A query in the Lucene query string syntax. */ q?: string + /** If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. + * This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. */ refresh?: boolean + /** If `true`, the request cache is used for this request. + * It defaults to the index-level setting. */ request_cache?: boolean + /** The throttle for this request in sub-requests per second. */ requests_per_second?: float + /** A custom value used to route operations to a specific shard. */ routing?: Routing + /** The period to retain the search context for scrolling. */ scroll?: Duration + /** The size of the scroll request that powers the operation. */ scroll_size?: long + /** An explicit timeout for each search request. + * By default, there is no timeout. */ search_timeout?: Duration + /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType + /** The number of slices this task should be divided into. */ slices?: Slices + /** A comma-separated list of : pairs. */ sort?: string[] + /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long + /** The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. + * By default, it is one minute. + * This guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration + /** If `true`, returns the document version as part of a hit. */ version?: boolean + /** Should the document increment the version number (internal) on hit or not (reindex) */ version_type?: boolean + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The `timeout` parameter controls how long each write request waits for unavailable shards to become available. + * Both work exactly the way they work in the bulk API. */ wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. + * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. + * Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. */ wait_for_completion?: boolean + /** The maximum number of documents to update. */ max_docs?: long + /** The documents to update using the Query DSL. */ query?: QueryDslQueryContainer + /** The script to run to update the document source or metadata when updating. */ script?: Script | string + /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll + /** The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. */ conflicts?: Conflicts + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, pipeline?: never, preference?: never, q?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, script?: never, slice?: never, conflicts?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, pipeline?: never, preference?: never, q?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, script?: never, slice?: never, conflicts?: never } } export interface UpdateByQueryResponse { + /** The number of scroll responses pulled back by the update by query. */ batches?: long + /** Array of failures if there were any unrecoverable errors during the process. + * If this is non-empty then the request ended because of those failures. + * Update by query is implemented using batches. + * Any failure causes the entire process to end, but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent reindex from ending when version conflicts occur. */ failures?: BulkIndexByScrollFailure[] + /** The number of documents that were ignored because the script used for the update by query returned a noop value for `ctx.op`. */ noops?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** The number of requests per second effectively run during the update by query. */ requests_per_second?: float + /** The number of retries attempted by update by query. + * `bulk` is the number of bulk actions retried. + * `search` is the number of search actions retried. */ retries?: Retries task?: TaskId + /** If true, some requests timed out during the update by query. */ timed_out?: boolean + /** The number of milliseconds from start to end of the whole operation. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of documents that were successfully updated. */ updated?: long + /** The number of version conflicts that the update by query hit. */ version_conflicts?: long throttled?: Duration + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in an _update_by_query response. + * It only has meaning when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: DurationValue } export interface UpdateByQueryRethrottleRequest extends RequestBase { + /** The ID for the task. */ task_id: Id + /** The throttle for this request in sub-requests per second. + * To turn off throttling, set it to `-1`. */ requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } } export interface UpdateByQueryRethrottleResponse { @@ -2112,6 +3737,7 @@ export type SpecUtilsStringified = T | string export type SpecUtilsWithNullValue = T | SpecUtilsNullValue export interface AcknowledgedResponseBase { + /** For a successful response, this value is always true. On failure, an exception is returned instead. */ acknowledged: boolean } @@ -2170,7 +3796,9 @@ export interface ClusterStatistics { } export interface CompletionStats { + /** Total amount, in bytes, of memory used for completion across all shards assigned to selected nodes. */ size_in_bytes: long + /** Total amount of memory used for completion across all shards assigned to selected nodes. */ size?: ByteSize fields?: Record } @@ -2205,9 +3833,17 @@ export type Distance = string export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' export interface DocStats { + /** Total number of non-deleted documents across all primary shards assigned to selected nodes. + * This number is based on documents in Lucene segments and may include documents from nested fields. */ count: long + /** Total number of deleted documents across all primary shards assigned to selected nodes. + * This number is based on documents in Lucene segments. + * Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */ deleted?: long + /** Returns the total size in bytes of all documents in this stats. + * This value may be more reliable than store_stats.size_in_bytes in estimating the index size. */ total_size_in_bytes: long + /** Human readable total_size_in_bytes */ total_size?: ByteSize } @@ -2218,14 +3854,28 @@ export type DurationLarge = string export type DurationValue = Unit export interface ElasticsearchVersionInfo { + /** The Elasticsearch Git commit's date. */ build_date: DateTime + /** The build flavor. For example, `default`. */ build_flavor: string + /** The Elasticsearch Git commit's SHA hash. */ build_hash: string + /** Indicates whether the Elasticsearch build was a snapshot. */ build_snapshot: boolean + /** The build type that corresponds to how Elasticsearch was installed. + * For example, `docker`, `rpm`, or `tar`. */ build_type: string + /** The version number of Elasticsearch's underlying Lucene software. */ lucene_version: VersionString + /** The minimum index version with which the responding node can read from disk. */ minimum_index_compatibility_version: VersionString + /** The minimum node version with which the responding node can communicate. + * Also the minimum version from which you can perform a rolling upgrade. */ minimum_wire_compatibility_version: VersionString + /** The Elasticsearch version number. + * + * ::: IMPORTANT: For Serverless deployments, this static value is always `8.11.0` and is used solely for backward compatibility with legacy clients. + * Serverless environments are versionless and automatically upgraded, so this value can be safely ignored. */ number: string } @@ -2242,8 +3892,11 @@ export interface EmptyObject { export type EpochTime = Unit export interface ErrorCauseKeys { + /** The type of error */ type: string + /** A human-readable explanation of the error, in English. */ reason?: string | null + /** The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. */ stack_trace?: string caused_by?: ErrorCause root_cause?: ErrorCause[] @@ -2332,7 +3985,9 @@ export type GeoHashPrecision = number | string export type GeoHexCell = string export interface GeoLine { + /** Always `"LineString"` */ type: string + /** Array of `[lon, lat]` coordinates */ coordinates: double[][] } @@ -2416,9 +4071,17 @@ export interface IndexingStats { export type Indices = IndexName | IndexName[] export interface IndicesOptions { + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean } @@ -2446,41 +4109,71 @@ export interface InnerRetriever { export type Ip = string export interface KnnQuery extends QueryDslQueryBase { + /** The name of the vector field to search against */ field: Field + /** The query vector */ query_vector?: QueryVector + /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ query_vector_builder?: QueryVectorBuilder + /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer + /** The final number of nearest neighbors to return as top hits */ k?: integer + /** Filters for the kNN search query */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The minimum similarity for a vector to be considered a match */ similarity?: float + /** Apply oversampling and rescoring to quantized vectors * + * @experimental */ rescore_vector?: RescoreVector } export interface KnnRetriever extends RetrieverBase { + /** The name of the vector field to search against. */ field: string + /** Query vector. Must have the same number of dimensions as the vector field you are searching against. You must provide a query_vector_builder or query_vector, but not both. */ query_vector?: QueryVector + /** Defines a model to build a query vector. */ query_vector_builder?: QueryVectorBuilder + /** Number of nearest neighbors to return as top hits. */ k: integer + /** Number of nearest neighbor candidates to consider per shard. */ num_candidates: integer + /** The minimum similarity required for a document to be considered a match. */ similarity?: float + /** Apply oversampling and rescoring to quantized vectors * + * @experimental */ rescore_vector?: RescoreVector } export interface KnnSearch { + /** The name of the vector field to search against */ field: Field + /** The query vector */ query_vector?: QueryVector + /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ query_vector_builder?: QueryVectorBuilder + /** The final number of nearest neighbors to return as top hits */ k?: integer + /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer + /** Boost value to apply to kNN scores */ boost?: float + /** Filters for the kNN search query */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The minimum similarity for a vector to be considered a match */ similarity?: float + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Apply oversampling and rescoring to quantized vectors * + * @experimental */ rescore_vector?: RescoreVector } export interface LatLonGeoLocation { + /** Latitude */ lat: double + /** Longitude */ lon: double } @@ -2489,6 +4182,7 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' export interface LinearRetriever extends RetrieverBase { + /** Inner retrievers. */ retrievers?: InnerRetriever[] rank_window_size?: integer query?: string @@ -2539,10 +4233,15 @@ export interface NestedSortValue { } export interface NodeAttributes { + /** Lists node attributes. */ attributes: Record + /** The ephemeral ID of the node. */ ephemeral_id: Id + /** The unique identifier of the node. */ id?: NodeId + /** The unique identifier of the node. */ name: NodeName + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress } @@ -2571,8 +4270,11 @@ export interface NodeShard { export interface NodeStatistics { failures?: ErrorCause[] + /** Total number of nodes selected by the request. */ total: integer + /** Number of nodes that responded successfully to the request. */ successful: integer + /** Number of nodes that rejected the request or failed to respond. If this value is not 0, a reason for the rejection or failure is included in the response. */ failed: integer } @@ -2585,6 +4287,7 @@ export type Password = string export type Percentage = string | float export interface PinnedRetriever extends RetrieverBase { + /** Inner retriever. */ retriever: RetrieverContainer ids?: string[] docs?: SpecifiedDocument[] @@ -2608,13 +4311,22 @@ export interface PluginStats { export type PropertyName = string export interface QueryCacheStats { + /** Total number of entries added to the query cache across all shards assigned to selected nodes. + * This number includes current and evicted entries. */ cache_count: long + /** Total number of entries currently in the query cache across all shards assigned to selected nodes. */ cache_size: long + /** Total number of query cache evictions across all shards assigned to selected nodes. */ evictions: long + /** Total count of query cache hits across all shards assigned to selected nodes. */ hit_count: long + /** Total amount of memory used for the query cache across all shards assigned to selected nodes. */ memory_size?: ByteSize + /** Total amount, in bytes, of memory used for the query cache across all shards assigned to selected nodes. */ memory_size_in_bytes: long + /** Total count of query cache misses across all shards assigned to selected nodes. */ miss_count: long + /** Total count of hits and misses in the query cache across all shards assigned to selected nodes. */ total_count: long } @@ -2625,8 +4337,11 @@ export interface QueryVectorBuilder { } export interface RRFRetriever extends RetrieverBase { + /** A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. */ retrievers: RetrieverContainer[] + /** This value determines how much influence documents in individual result sets per query have over the final ranked result set. */ rank_constant?: integer + /** This value determines the size of the individual result sets per query. */ rank_window_size?: integer query?: string fields?: string[] @@ -2636,6 +4351,7 @@ export interface RankBase { } export interface RankContainer { + /** The reciprocal rank fusion parameters */ rrf?: RrfRank } @@ -2675,10 +4391,12 @@ export interface RequestCacheStats { } export interface RescoreVector { + /** Applies the specified oversample factor to k on the approximate kNN search */ oversample: float } export interface RescorerRetriever extends RetrieverBase { + /** Inner retriever. */ retriever: RetrieverContainer rescore: SearchRescore | SearchRescore[] } @@ -2686,38 +4404,58 @@ export interface RescorerRetriever extends RetrieverBase { export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export interface Retries { + /** The number of bulk actions retried. */ bulk: long + /** The number of search actions retried. */ search: long } export interface RetrieverBase { + /** Query to filter the documents that can match. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */ min_score?: float + /** Retriever name. */ _name?: string } export interface RetrieverContainer { + /** A retriever that replaces the functionality of a traditional query. */ standard?: StandardRetriever + /** A retriever that replaces the functionality of a knn search. */ knn?: KnnRetriever + /** A retriever that produces top documents from reciprocal rank fusion (RRF). */ rrf?: RRFRetriever + /** A retriever that reranks the top documents based on a reranking model using the InferenceAPI */ text_similarity_reranker?: TextSimilarityReranker + /** A retriever that replaces the functionality of a rule query. */ rule?: RuleRetriever + /** A retriever that re-scores only the results produced by its child retriever. */ rescorer?: RescorerRetriever + /** A retriever that supports the combination of different retrievers through a weighted linear combination. */ linear?: LinearRetriever + /** A pinned retriever applies pinned documents to the underlying retriever. + * This retriever will rewrite to a PinnedQueryBuilder. */ pinned?: PinnedRetriever } export type Routing = string export interface RrfRank { + /** How much influence documents in individual result sets per query have over the final ranked result set */ rank_constant?: long + /** Size of the individual result sets per query */ rank_window_size?: long } export interface RuleRetriever extends RetrieverBase { + /** The ruleset IDs containing the rules this retriever is evaluating against. */ ruleset_ids: Id | Id[] + /** The match criteria that will determine if a rule in the provided rulesets should be applied. */ match_criteria: any + /** The retriever whose results rules should be applied to. */ retriever: RetrieverContainer + /** This value determines the size of the individual result set. */ rank_window_size?: integer } @@ -2730,9 +4468,14 @@ export interface ScoreSort { } export interface Script { + /** The script source. */ source?: string + /** The `id` for a stored script. */ id?: Id + /** Specifies any named parameters that are passed into the script as variables. + * Use parameters instead of hard-coded values to decrease compile time. */ params?: Record + /** Specifies the language the script is written in. */ lang?: ScriptLanguage options?: Record } @@ -2794,28 +4537,53 @@ export interface SearchTransform { export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' export interface SegmentsStats { + /** Total number of segments across all shards assigned to selected nodes. */ count: integer + /** Total amount of memory used for doc values across all shards assigned to selected nodes. */ doc_values_memory?: ByteSize + /** Total amount, in bytes, of memory used for doc values across all shards assigned to selected nodes. */ doc_values_memory_in_bytes: long + /** This object is not populated by the cluster stats API. + * To get information on segment files, use the node stats API. */ file_sizes: Record + /** Total amount of memory used by fixed bit sets across all shards assigned to selected nodes. + * Fixed bit sets are used for nested object field types and type filters for join fields. */ fixed_bit_set?: ByteSize + /** Total amount of memory, in bytes, used by fixed bit sets across all shards assigned to selected nodes. */ fixed_bit_set_memory_in_bytes: long + /** Total amount of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory?: ByteSize + /** Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory_in_bytes: long + /** Unix timestamp, in milliseconds, of the most recently retried indexing request. */ max_unsafe_auto_id_timestamp: long + /** Total amount of memory used for segments across all shards assigned to selected nodes. */ memory?: ByteSize + /** Total amount, in bytes, of memory used for segments across all shards assigned to selected nodes. */ memory_in_bytes: long + /** Total amount of memory used for normalization factors across all shards assigned to selected nodes. */ norms_memory?: ByteSize + /** Total amount, in bytes, of memory used for normalization factors across all shards assigned to selected nodes. */ norms_memory_in_bytes: long + /** Total amount of memory used for points across all shards assigned to selected nodes. */ points_memory?: ByteSize + /** Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */ points_memory_in_bytes: long + /** Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */ stored_fields_memory_in_bytes: long + /** Total amount of memory used for stored fields across all shards assigned to selected nodes. */ stored_fields_memory?: ByteSize + /** Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */ terms_memory_in_bytes: long + /** Total amount of memory used for terms across all shards assigned to selected nodes. */ terms_memory?: ByteSize + /** Total amount of memory used for term vectors across all shards assigned to selected nodes. */ term_vectors_memory?: ByteSize + /** Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */ term_vectors_memory_in_bytes: long + /** Total amount of memory used by all version maps across all shards assigned to selected nodes. */ version_map_memory?: ByteSize + /** Total amount, in bytes, of memory used by all version maps across all shards assigned to selected nodes. */ version_map_memory_in_bytes: long } @@ -2832,8 +4600,11 @@ export interface ShardFailure { } export interface ShardStatistics { + /** The number of shards the operation or search attempted to run on but failed. */ failed: uint + /** The number of shards the operation or search succeeded on. */ successful: uint + /** The number of shards the operation or search will run on overall. */ total: uint failures?: ShardFailure[] skipped?: uint @@ -2878,25 +4649,42 @@ export interface SpecifiedDocument { } export interface StandardRetriever extends RetrieverBase { + /** Defines a query to retrieve a set of top documents. */ query?: QueryDslQueryContainer + /** Defines a search after object parameter used for pagination. */ search_after?: SortResults + /** Maximum number of documents to collect for each shard. */ terminate_after?: integer + /** A sort object that that specifies the order of matching documents. */ sort?: Sort + /** Collapses the top documents by a specified key into a single top document per key. */ collapse?: SearchFieldCollapse } export interface StoreStats { + /** Total size of all shards assigned to selected nodes. */ size?: ByteSize + /** Total size, in bytes, of all shards assigned to selected nodes. */ size_in_bytes: long + /** A prediction of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ reserved?: ByteSize + /** A prediction, in bytes, of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ reserved_in_bytes: long + /** Total data set size of all shards assigned to selected nodes. + * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ total_data_set_size?: ByteSize + /** Total data set size, in bytes, of all shards assigned to selected nodes. + * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ total_data_set_size_in_bytes?: long } export interface StoredScript { + /** The language the script is written in. + * For serach templates, use `mustache`. */ lang: ScriptLanguage options?: Record + /** The script source. + * For search templates, an object containing the search template. */ source: string } @@ -2921,10 +4709,15 @@ export interface TextEmbedding { } export interface TextSimilarityReranker extends RetrieverBase { + /** The nested retriever which will produce the first-level results, that will later be used for reranking. */ retriever: RetrieverContainer + /** This value determines how many documents we will consider from the nested retriever. */ rank_window_size?: integer + /** Unique identifier of the inference endpoint created using the inference API. */ inference_id?: string + /** The text snippet used as the basis for similarity comparison */ inference_text: string + /** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */ field: string } @@ -2937,8 +4730,11 @@ export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' export type TimeZone = string export interface TokenPruningConfig { + /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ tokens_freq_ratio_threshold?: integer + /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ tokens_weight_threshold?: float + /** Whether to only score pruned tokens, vs only scoring kept tokens. */ only_score_pruned_tokens?: boolean } @@ -3006,12 +4802,20 @@ export interface WktGeoBounds { } export interface WriteResponseBase { + /** The unique identifier for the added document. */ _id: Id + /** The name of the index the document was added to. */ _index: IndexName + /** The primary term assigned to the document for the indexing operation. */ _primary_term?: long + /** The result of the indexing operation: `created` or `updated`. */ result: Result + /** The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** Information about the replication process of the operation. */ _shards: ShardStatistics + /** The document version, which is incremented each time the document is updated. */ _version: VersionNumber forced_refresh?: boolean } @@ -3036,7 +4840,10 @@ export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiB } export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { + /** Filters used to create buckets. + * At least one filter is required. */ filters?: Record + /** Separator used to concatenate filter names. Defaults to &. */ separator?: string } @@ -3058,92 +4865,199 @@ export interface AggregationsAggregation { } export interface AggregationsAggregationContainer { + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. */ aggregations?: Record + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. + * @alias aggregations */ aggs?: Record meta?: Metadata + /** A bucket aggregation returning a form of adjacency matrix. + * The request provides a collection of named filter expressions, similar to the `filters` aggregation. + * Each bucket in the response represents a non-empty cell in the matrix of intersecting filters. */ adjacency_matrix?: AggregationsAdjacencyMatrixAggregation + /** A multi-bucket aggregation similar to the date histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ auto_date_histogram?: AggregationsAutoDateHistogramAggregation + /** A single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. */ avg?: AggregationsAverageAggregation + /** A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. + * The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. */ avg_bucket?: AggregationsAverageBucketAggregation + /** A metrics aggregation that computes a box plot of numeric values extracted from the aggregated documents. */ boxplot?: AggregationsBoxplotAggregation + /** A parent pipeline aggregation which runs a script which can perform per bucket computations on metrics in the parent multi-bucket aggregation. */ bucket_script?: AggregationsBucketScriptAggregation + /** A parent pipeline aggregation which runs a script to determine whether the current bucket will be retained in the parent multi-bucket aggregation. */ bucket_selector?: AggregationsBucketSelectorAggregation + /** A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. */ bucket_sort?: AggregationsBucketSortAggregation + /** A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov test ("K-S test") against a provided distribution and the distribution implied by the documents counts in the configured sibling aggregation. + * @experimental */ bucket_count_ks_test?: AggregationsBucketKsAggregation + /** A sibling pipeline aggregation which runs a correlation function on the configured sibling multi-bucket aggregation. + * @experimental */ bucket_correlation?: AggregationsBucketCorrelationAggregation + /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation + /** A multi-bucket aggregation that groups semi-structured text into buckets. + * @experimental */ categorize_text?: AggregationsCategorizeTextAggregation + /** A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. */ children?: AggregationsChildrenAggregation + /** A multi-bucket aggregation that creates composite buckets from different sources. + * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ composite?: AggregationsCompositeAggregation + /** A parent pipeline aggregation which calculates the cumulative cardinality in a parent `histogram` or `date_histogram` aggregation. */ cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation + /** A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ cumulative_sum?: AggregationsCumulativeSumAggregation + /** A multi-bucket values source based aggregation that can be applied on date values or date range values extracted from the documents. + * It dynamically builds fixed size (interval) buckets over the values. */ date_histogram?: AggregationsDateHistogramAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ date_range?: AggregationsDateRangeAggregation + /** A parent pipeline aggregation which calculates the derivative of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ derivative?: AggregationsDerivativeAggregation + /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. + * Similar to the `sampler` aggregation, but adds the ability to limit the number of matches that share a common value. */ diversified_sampler?: AggregationsDiversifiedSamplerAggregation + /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ extended_stats?: AggregationsExtendedStatsAggregation + /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation + /** A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. */ frequent_item_sets?: AggregationsFrequentItemSetsAggregation + /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: QueryDslQueryContainer + /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ filters?: AggregationsFiltersAggregation + /** A metric aggregation that computes the geographic bounding box containing all values for a Geopoint or Geoshape field. */ geo_bounds?: AggregationsGeoBoundsAggregation + /** A metric aggregation that computes the weighted centroid from all coordinate values for geo fields. */ geo_centroid?: AggregationsGeoCentroidAggregation + /** A multi-bucket aggregation that works on `geo_point` fields. + * Evaluates the distance of each document value from an origin point and determines the buckets it belongs to, based on ranges defined in the request. */ geo_distance?: AggregationsGeoDistanceAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell is labeled using a geohash which is of user-definable precision. */ geohash_grid?: AggregationsGeoHashGridAggregation + /** Aggregates all `geo_point` values within a bucket into a `LineString` ordered by the chosen sort field. */ geo_line?: AggregationsGeoLineAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell corresponds to a map tile as used by many online map sites. */ geotile_grid?: AggregationsGeoTileGridAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell corresponds to a H3 cell index and is labeled using the H3Index representation. */ geohex_grid?: AggregationsGeohexGridAggregation + /** Defines a single bucket of all the documents within the search execution context. + * This context is defined by the indices and the document types you’re searching on, but is not influenced by the search query itself. */ global?: AggregationsGlobalAggregation + /** A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values extracted from the documents. + * It dynamically builds fixed size (interval) buckets over the values. */ histogram?: AggregationsHistogramAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of IP ranges - each representing a bucket. */ ip_range?: AggregationsIpRangeAggregation + /** A bucket aggregation that groups documents based on the network or sub-network of an IP address. */ ip_prefix?: AggregationsIpPrefixAggregation + /** A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields from the parent bucket aggregation. */ inference?: AggregationsInferenceAggregation line?: AggregationsGeoLineAggregation + /** A numeric aggregation that computes the following statistics over a set of document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, `covariance`, and `covariance`. */ matrix_stats?: AggregationsMatrixStatsAggregation + /** A single-value metrics aggregation that returns the maximum value among the numeric values extracted from the aggregated documents. */ max?: AggregationsMaxAggregation + /** A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ max_bucket?: AggregationsMaxBucketAggregation + /** A single-value aggregation that approximates the median absolute deviation of its search results. */ median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation + /** A single-value metrics aggregation that returns the minimum value among numeric values extracted from the aggregated documents. */ min?: AggregationsMinAggregation + /** A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ min_bucket?: AggregationsMinBucketAggregation + /** A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). */ missing?: AggregationsMissingAggregation moving_avg?: AggregationsMovingAverageAggregation + /** Given an ordered series of percentiles, "slides" a window across those percentiles and computes cumulative percentiles. */ moving_percentiles?: AggregationsMovingPercentilesAggregation + /** Given an ordered series of data, "slides" a window across the data and runs a custom script on each window of data. + * For convenience, a number of common functions are predefined such as `min`, `max`, and moving averages. */ moving_fn?: AggregationsMovingFunctionAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. */ multi_terms?: AggregationsMultiTermsAggregation + /** A special single bucket aggregation that enables aggregating nested documents. */ nested?: AggregationsNestedAggregation + /** A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value. */ normalize?: AggregationsNormalizeAggregation + /** A special single bucket aggregation that selects parent documents that have the specified type, as defined in a `join` field. */ parent?: AggregationsParentAggregation + /** A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from the aggregated documents. */ percentile_ranks?: AggregationsPercentileRanksAggregation + /** A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents. */ percentiles?: AggregationsPercentilesAggregation + /** A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. */ percentiles_bucket?: AggregationsPercentilesBucketAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ range?: AggregationsRangeAggregation + /** A multi-bucket value source based aggregation which finds "rare" terms—terms that are at the long-tail of the distribution and are not frequent. */ rare_terms?: AggregationsRareTermsAggregation + /** Calculates a rate of documents or a field in each bucket. + * Can only be used inside a `date_histogram` or `composite` aggregation. */ rate?: AggregationsRateAggregation + /** A special single bucket aggregation that enables aggregating on parent documents from nested documents. + * Should only be defined inside a `nested` aggregation. */ reverse_nested?: AggregationsReverseNestedAggregation + /** A single bucket aggregation that randomly includes documents in the aggregated results. + * Sampling provides significant speed improvement at the cost of accuracy. + * @remarks This property is not supported on Elastic Cloud Serverless. + * @experimental */ random_sampler?: AggregationsRandomSamplerAggregation + /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. */ sampler?: AggregationsSamplerAggregation + /** A metric aggregation that uses scripts to provide a metric output. */ scripted_metric?: AggregationsScriptedMetricAggregation + /** An aggregation that subtracts values in a time series from themselves at different time lags or periods. */ serial_diff?: AggregationsSerialDifferencingAggregation + /** Returns interesting or unusual occurrences of terms in a set. */ significant_terms?: AggregationsSignificantTermsAggregation + /** Returns interesting or unusual occurrences of free-text terms in a set. */ significant_text?: AggregationsSignificantTextAggregation + /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ stats?: AggregationsStatsAggregation + /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ stats_bucket?: AggregationsStatsBucketAggregation + /** A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated documents. */ string_stats?: AggregationsStringStatsAggregation + /** A single-value metrics aggregation that sums numeric values that are extracted from the aggregated documents. */ sum?: AggregationsSumAggregation + /** A sibling pipeline aggregation which calculates the sum of a specified metric across all buckets in a sibling aggregation. */ sum_bucket?: AggregationsSumBucketAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ terms?: AggregationsTermsAggregation + /** The time series aggregation queries data created using a time series index. + * This is typically data such as metrics or other data streams with a time component, and requires creating an index using the time series mode. + * @experimental */ time_series?: AggregationsTimeSeriesAggregation + /** A metric aggregation that returns the top matching documents per bucket. */ top_hits?: AggregationsTopHitsAggregation + /** A metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. */ t_test?: AggregationsTTestAggregation + /** A metric aggregation that selects metrics from the document with the largest or smallest sort value. */ top_metrics?: AggregationsTopMetricsAggregation + /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ value_count?: AggregationsValueCountAggregation + /** A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the aggregated documents. */ weighted_avg?: AggregationsWeightedAverageAggregation + /** A multi-bucket aggregation similar to the histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ variable_width_histogram?: AggregationsVariableWidthHistogramAggregation } export interface AggregationsAggregationRange { + /** Start of the range (inclusive). */ from?: double | null + /** Custom key to return the range with. */ key?: string + /** End of the range (exclusive). */ to?: double | null } @@ -3158,14 +5072,24 @@ export interface AggregationsAutoDateHistogramAggregate extends AggregationsMult } export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { + /** The target number of buckets. */ buckets?: integer + /** The field on which to run the aggregation. */ field?: Field + /** The date format used to format `key_as_string` in the response. + * If no `format` is specified, the first date format specified in the field mapping is used. */ format?: string + /** The minimum rounding interval. + * This can make the collection process more efficient, as the aggregation will not attempt to round at any interval lower than `minimum_interval`. */ minimum_interval?: AggregationsMinimumInterval + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: DateTime + /** Time zone specified as a ISO 8601 UTC offset. */ offset?: string params?: Record script?: Script | string + /** Time zone ID. */ time_zone?: TimeZone } @@ -3196,7 +5120,10 @@ export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase } export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ execution_hint?: AggregationsTDigestExecutionHint } @@ -3204,26 +5131,49 @@ export interface AggregationsBucketAggregationBase { } export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { + /** The correlation function to execute. */ function: AggregationsBucketCorrelationFunction } export interface AggregationsBucketCorrelationFunction { + /** The configuration to calculate a count correlation. This function is designed for determining the correlation of a term value and a given metric. */ count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation } export interface AggregationsBucketCorrelationFunctionCountCorrelation { + /** The indicator with which to correlate the configured `bucket_path` values. */ indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator } export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { + /** The total number of documents that initially created the expectations. It’s required to be greater + * than or equal to the sum of all values in the buckets_path as this is the originating superset of data + * to which the term values are correlated. */ doc_count: integer + /** An array of numbers with which to correlate the configured `bucket_path` values. + * The length of this value must always equal the number of buckets returned by the `bucket_path`. */ expectations: double[] + /** An array of fractions to use when averaging and calculating variance. This should be used if + * the pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided, + * must equal expectations. */ fractions?: double[] } export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { + /** A list of string values indicating which K-S test alternative to calculate. The valid values + * are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used + * when calculating the K-S test. Default value is all possible alternative hypotheses. */ alternative?: string[] + /** A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results. + * In typical usage this is the overall proportion of documents in each bucket, which is compared with the actual + * document proportions in each bucket from the sibling aggregation counts. The default is to assume that overall + * documents are uniformly distributed on these buckets, which they would be if one used equal percentiles of a + * metric to define the bucket end points. */ fractions?: double[] + /** Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values. + * This determines the cumulative distribution function (CDF) points used comparing the two samples. Default is + * `upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`, + * and `lower_tail`. */ sampling_method?: string } @@ -3232,21 +5182,29 @@ export interface AggregationsBucketMetricValueAggregate extends AggregationsSing } export interface AggregationsBucketPathAggregation { + /** Path to the buckets that contain one set of values to correlate. */ buckets_path?: AggregationsBucketsPath } export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { + /** The script to run for this aggregation. */ script?: Script | string } export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { + /** The script to run for this aggregation. */ script?: Script | string } export interface AggregationsBucketSortAggregation { + /** Buckets in positions prior to `from` will be truncated. */ from?: integer + /** The policy to apply when gaps are found in the data. */ gap_policy?: AggregationsGapPolicy + /** The number of buckets to return. + * Defaults to all buckets of the parent aggregation. */ size?: integer + /** The list of fields to sort on. */ sort?: Sort } @@ -3261,30 +5219,58 @@ export interface AggregationsCardinalityAggregate extends AggregationsAggregateB } export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { + /** A unique count below which counts are expected to be close to accurate. + * This allows to trade memory for accuracy. */ precision_threshold?: integer rehash?: boolean + /** Mechanism by which cardinality aggregations is run. */ execution_hint?: AggregationsCardinalityExecutionMode } export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' export interface AggregationsCategorizeTextAggregation { + /** The semi-structured text field to categorize. */ field: Field + /** The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1. + * Smaller values use less memory and create fewer categories. Larger values will use more memory and + * create narrower categories. Max allowed value is 100. */ max_unique_tokens?: integer + /** The maximum number of token positions to match on before attempting to merge categories. Larger + * values will use more memory and create narrower categories. Max allowed value is 100. */ max_matched_tokens?: integer + /** The minimum percentage of tokens that must match for text to be added to the category bucket. Must + * be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory + * usage and create narrower categories. */ similarity_threshold?: integer + /** This property expects an array of regular expressions. The expressions are used to filter out matching + * sequences from the categorization field values. You can use this functionality to fine tune the categorization + * by excluding sequences from consideration when categories are defined. For example, you can exclude SQL + * statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer. + * If you only want to define simple regular expression filters that are applied prior to tokenization, setting + * this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, + * use the categorization_analyzer property instead and include the filters as pattern_replace character filters. */ categorization_filters?: string[] + /** The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. + * The syntax is very similar to that used to define the analyzer in the `_analyze` endpoint. This property + * cannot be used at the same time as categorization_filters. */ categorization_analyzer?: AggregationsCategorizeTextAnalyzer + /** The number of categorization buckets to return from each shard before merging all the results. */ shard_size?: integer + /** The number of buckets to return. */ size?: integer + /** The minimum number of documents in a bucket to be returned to the results. */ min_doc_count?: integer + /** The minimum number of documents in a bucket to be returned from the shard before merging. */ shard_min_doc_count?: integer } export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer export interface AggregationsChiSquareHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset: boolean + /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ include_negatives: boolean } @@ -3294,6 +5280,7 @@ export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { + /** The child type that should be selected. */ type?: RelationName } @@ -3304,24 +5291,34 @@ export interface AggregationsCompositeAggregate extends AggregationsMultiBucketA export type AggregationsCompositeAggregateKey = Record export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { + /** When paginating, use the `after_key` value returned in the previous response to retrieve the next page. */ after?: AggregationsCompositeAggregateKey + /** The number of composite buckets that should be returned. */ size?: integer + /** The value sources used to build composite buckets. + * Keys are returned in the order of the `sources` definition. */ sources?: Record[] } export interface AggregationsCompositeAggregationBase { + /** Either `field` or `script` must be present */ field?: Field missing_bucket?: boolean missing_order?: AggregationsMissingOrder + /** Either `field` or `script` must be present */ script?: Script | string value_type?: AggregationsValueType order?: SortOrder } export interface AggregationsCompositeAggregationSource { + /** A terms aggregation. */ terms?: AggregationsCompositeTermsAggregation + /** A histogram aggregation. */ histogram?: AggregationsCompositeHistogramAggregation + /** A date histogram aggregation. */ date_histogram?: AggregationsCompositeDateHistogramAggregation + /** A geotile grid aggregation. */ geotile_grid?: AggregationsCompositeGeoTileGridAggregation } @@ -3333,7 +5330,9 @@ export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { format?: string + /** Either `calendar_interval` or `fixed_interval` must be present */ calendar_interval?: DurationLarge + /** Either `calendar_interval` or `fixed_interval` must be present */ fixed_interval?: DurationLarge offset?: Duration time_zone?: TimeZone @@ -3372,20 +5371,37 @@ export interface AggregationsDateHistogramAggregate extends AggregationsMultiBuc } export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { + /** Calendar-aware interval. + * Can be specified using the unit name, such as `month`, or as a single unit quantity, such as `1M`. */ calendar_interval?: AggregationsCalendarInterval + /** Enables extending the bounds of the histogram beyond the data itself. */ extended_bounds?: AggregationsExtendedBounds + /** Limits the histogram to specified bounds. */ hard_bounds?: AggregationsExtendedBounds + /** The date field whose values are use to build a histogram. */ field?: Field + /** Fixed intervals: a fixed number of SI units and never deviate, regardless of where they fall on the calendar. */ fixed_interval?: Duration + /** The date format used to format `key_as_string` in the response. + * If no `format` is specified, the first date format specified in the field mapping is used. */ format?: string interval?: Duration + /** Only returns buckets that have `min_doc_count` number of documents. + * By default, all buckets between the first bucket that matches documents and the last one are returned. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: DateTime + /** Changes the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration. */ offset?: Duration + /** The sort order of the returned buckets. */ order?: AggregationsAggregateOrder params?: Record script?: Script | string + /** Time zone used for bucketing and rounding. + * Defaults to Coordinated Universal Time (UTC). */ time_zone?: TimeZone + /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ keyed?: boolean } @@ -3400,17 +5416,27 @@ export interface AggregationsDateRangeAggregate extends AggregationsRangeAggrega } export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are use to build ranges. */ field?: Field + /** The date format used to format `from` and `to` in the response. */ format?: string + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing + /** Array of date ranges. */ ranges?: AggregationsDateRangeExpression[] + /** Time zone used to convert dates from another time zone to UTC. */ time_zone?: TimeZone + /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean } export interface AggregationsDateRangeExpression { + /** Start of the range (inclusive). */ from?: AggregationsFieldDateMath + /** Custom key to return the range with. */ key?: string + /** End of the range (exclusive). */ to?: AggregationsFieldDateMath } @@ -3423,10 +5449,14 @@ export interface AggregationsDerivativeAggregation extends AggregationsPipelineA } export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { + /** The type of value used for de-duplication. */ execution_hint?: AggregationsSamplerAggregationExecutionHint + /** Limits how many documents are permitted per choice of de-duplicating value. */ max_docs_per_value?: integer script?: Script | string + /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ shard_size?: integer + /** The field used to provide values used for de-duplication. */ field?: Field } @@ -3450,7 +5480,9 @@ export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMo } export interface AggregationsExtendedBounds { + /** Maximum value for the bound. */ max?: T + /** Minimum value for the bound. */ min?: T } @@ -3472,6 +5504,7 @@ export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAgg } export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { + /** The number of standard deviations above/below the mean to display. */ sigma?: double } @@ -3479,6 +5512,7 @@ export interface AggregationsExtendedStatsBucketAggregate extends AggregationsEx } export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { + /** The number of standard deviations above/below the mean to display. */ sigma?: double } @@ -3493,9 +5527,14 @@ export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAgg } export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { + /** Collection of queries from which to build buckets. */ filters?: AggregationsBuckets + /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ other_bucket?: boolean + /** The key with which the other bucket is returned. */ other_bucket_key?: string + /** By default, the named filters aggregation returns the buckets as an object. + * Set to `false` to return the buckets as an array of objects. */ keyed?: boolean } @@ -3517,10 +5556,15 @@ export interface AggregationsFrequentItemSetsAggregate extends AggregationsMulti } export interface AggregationsFrequentItemSetsAggregation { + /** Fields to analyze. */ fields: AggregationsFrequentItemSetsField[] + /** The minimum size of one item set. */ minimum_set_size?: integer + /** The minimum support of one item set. */ minimum_support?: double + /** The number of top item sets to return. */ size?: integer + /** Query that filters documents from analysis. */ filter?: QueryDslQueryContainer } @@ -3533,7 +5577,11 @@ export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBuc export interface AggregationsFrequentItemSetsField { field: Field + /** Values to exclude. + * Can be regular expression strings or arrays of strings of exact terms. */ exclude?: AggregationsTermsExclude + /** Values to include. + * Can be regular expression strings or arrays of strings of exact terms. */ include?: AggregationsTermsInclude } @@ -3544,6 +5592,7 @@ export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBas } export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { + /** Specifies whether the bounding box should be allowed to overlap the international date line. */ wrap_longitude?: boolean } @@ -3561,10 +5610,15 @@ export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggre } export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { + /** The distance calculation type. */ distance_type?: GeoDistanceType + /** A field of type `geo_point` used to evaluate the distance. */ field?: Field + /** The origin used to evaluate the distance. */ origin?: GeoLocation + /** An array of ranges used to bucket documents. */ ranges?: AggregationsAggregationRange[] + /** The distance unit. */ unit?: DistanceUnit } @@ -3572,10 +5626,17 @@ export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucke } export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { + /** The bounding box to filter the points in each bucket. */ bounds?: GeoBounds + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geohash_grid` aggregates all array values. */ field?: Field + /** The string length of the geohashes used to define cells/buckets in the results. */ precision?: GeoHashPrecision + /** Allows for more accurate counting of the top cells returned in the final result the aggregation. + * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ shard_size?: integer + /** The maximum number of geohash buckets to return. */ size?: integer } @@ -3601,18 +5662,27 @@ export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase } export interface AggregationsGeoLineAggregation { + /** The name of the geo_point field. */ point: AggregationsGeoLinePoint + /** The name of the numeric field to use as the sort key for ordering the points. + * When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. */ sort: AggregationsGeoLineSort + /** When `true`, returns an additional array of the sort values in the feature properties. */ include_sort?: boolean + /** The order in which the line is sorted (ascending or descending). */ sort_order?: SortOrder + /** The maximum length of the line represented in the aggregation. + * Valid sizes are between 1 and 10000. */ size?: integer } export interface AggregationsGeoLinePoint { + /** The name of the geo_point field. */ field: Field } export interface AggregationsGeoLineSort { + /** The name of the numeric field to use as the sort key for ordering the points. */ field: Field } @@ -3620,10 +5690,18 @@ export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucke } export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geotile_grid` aggregates all array values. */ field?: Field + /** Integer zoom of the key used to define cells/buckets in the results. + * Values outside of the range [0,29] will be rejected. */ precision?: GeoTilePrecision + /** Allows for more accurate counting of the top cells returned in the final result the aggregation. + * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ shard_size?: integer + /** The maximum number of buckets to return. */ size?: integer + /** A bounding box to filter the geo-points or geo-shapes in each bucket. */ bounds?: GeoBounds } @@ -3634,10 +5712,17 @@ export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys & { [property: string]: AggregationsAggregate | GeoTile | long } export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geohex_grid` aggregates all array values. */ field: Field + /** Integer zoom of the key used to defined cells or buckets + * in the results. Value should be between 0-15. */ precision?: integer + /** Bounding box used to filter the geo-points in each bucket. */ bounds?: GeoBounds + /** Maximum number of buckets to return. */ size?: integer + /** Number of buckets returned from each shard. */ shard_size?: integer } @@ -3650,10 +5735,12 @@ export interface AggregationsGlobalAggregation extends AggregationsBucketAggrega } export interface AggregationsGoogleNormalizedDistanceHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset?: boolean } export interface AggregationsHdrMethod { + /** Specifies the resolution of values for the histogram in number of significant digits. */ number_of_significant_value_digits?: integer } @@ -3667,16 +5754,31 @@ export interface AggregationsHistogramAggregate extends AggregationsMultiBucketA } export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { + /** Enables extending the bounds of the histogram beyond the data itself. */ extended_bounds?: AggregationsExtendedBounds + /** Limits the range of buckets in the histogram. + * It is particularly useful in the case of open data ranges that can result in a very large number of buckets. */ hard_bounds?: AggregationsExtendedBounds + /** The name of the field to aggregate on. */ field?: Field + /** The interval for the buckets. + * Must be a positive decimal. */ interval?: double + /** Only returns buckets that have `min_doc_count` number of documents. + * By default, the response will fill gaps in the histogram with empty buckets. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: double + /** By default, the bucket keys start with 0 and then continue in even spaced steps of `interval`. + * The bucket boundaries can be shifted by using the `offset` option. */ offset?: double + /** The sort order of the returned buckets. + * By default, the returned buckets are sorted by their key ascending. */ order?: AggregationsAggregateOrder script?: Script | string format?: string + /** If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys. */ keyed?: boolean } @@ -3723,7 +5825,9 @@ export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys & { [property: string]: any } export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { + /** The ID or alias for the trained model. */ model_id: Name + /** Contains the inference type and its options. */ inference_config?: AggregationsInferenceConfigContainer } @@ -3733,7 +5837,9 @@ export interface AggregationsInferenceClassImportance { } export interface AggregationsInferenceConfigContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions } @@ -3753,11 +5859,18 @@ export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAg } export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { + /** The IP address field to aggregation on. The field mapping type must be `ip`. */ field: Field + /** Length of the network prefix. For IPv4 addresses the accepted range is [0, 32]. + * For IPv6 addresses the accepted range is [0, 128]. */ prefix_length: integer + /** Defines whether the prefix applies to IPv6 addresses. */ is_ipv6?: boolean + /** Defines whether the prefix length is appended to IP address keys in the response. */ append_prefix_length?: boolean + /** Defines whether buckets are returned as a hash rather than an array in the response. */ keyed?: boolean + /** Minimum number of documents in a bucket for it to be included in the response. */ min_doc_count?: long } @@ -3774,13 +5887,18 @@ export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAgg } export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are used to build ranges. */ field?: Field + /** Array of IP ranges. */ ranges?: AggregationsIpRangeAggregationRange[] } export interface AggregationsIpRangeAggregationRange { + /** Start of the range. */ from?: string | null + /** IP range defined as a CIDR mask. */ mask?: string + /** End of the range. */ to?: string | null } @@ -3820,7 +5938,10 @@ export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string } export interface AggregationsMatrixAggregation { + /** An array of fields for computing the statistics. */ fields?: Fields + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: Record } @@ -3830,6 +5951,7 @@ export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateB } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { + /** Array value the aggregation will use for array or multi-valued fields. */ mode?: SortMode } @@ -3857,12 +5979,18 @@ export interface AggregationsMedianAbsoluteDeviationAggregate extends Aggregatio } export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ execution_hint?: AggregationsTDigestExecutionHint } export interface AggregationsMetricAggregationBase { + /** The field on which to run the aggregation. */ field?: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing script?: Script | string } @@ -3886,6 +6014,7 @@ export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { + /** The name of the field. */ field?: Field missing?: AggregationsMissing } @@ -3901,13 +6030,20 @@ export interface AggregationsMovingAverageAggregationBase extends AggregationsPi } export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { + /** The script that should be executed on each window of data. */ script?: string + /** By default, the window consists of the last n values excluding the current bucket. + * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ shift?: integer + /** The size of window to "slide" across the histogram. */ window?: integer } export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { + /** The size of window to "slide" across the histogram. */ window?: integer + /** By default, the window consists of the last n values excluding the current bucket. + * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ shift?: integer keyed?: boolean } @@ -3921,7 +6057,10 @@ export interface AggregationsMultiBucketBase { } export interface AggregationsMultiTermLookup { + /** A fields from which to retrieve terms. */ field: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing } @@ -3929,13 +6068,23 @@ export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggreg } export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { + /** Specifies the strategy for data collection. */ collect_mode?: AggregationsTermsAggregationCollectMode + /** Specifies the sort order of the buckets. + * Defaults to sorting by descending document count. */ order?: AggregationsAggregateOrder + /** The minimum number of documents in a bucket for it to be returned. */ min_doc_count?: long + /** The minimum number of documents in a bucket on each shard for it to be returned. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** Calculates the doc count error on per term basis. */ show_term_doc_count_error?: boolean + /** The number of term buckets should be returned out of the overall terms list. */ size?: integer + /** The field from which to generate sets of terms. */ terms: AggregationsMultiTermLookup[] } @@ -3948,7 +6097,9 @@ export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys & { [property: string]: AggregationsAggregate | FieldValue[] | string | long } export interface AggregationsMutualInformationHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset?: boolean + /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ include_negatives?: boolean } @@ -3958,10 +6109,12 @@ export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { + /** The path to the field of type `nested`. */ path?: Field } export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { + /** The specific method to apply. */ method?: AggregationsNormalizeMethod } @@ -3973,6 +6126,7 @@ export type AggregationsParentAggregate = AggregationsParentAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { + /** The child type that should be selected. */ type?: RelationName } @@ -3980,9 +6134,14 @@ export interface AggregationsPercentageScoreHeuristic { } export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { + /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. + * Set to `false` to disable this behavior. */ keyed?: boolean + /** An array of values for which to calculate the percentile ranks. */ values?: double[] | null + /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentile ranks. */ hdr?: AggregationsHdrMethod + /** Sets parameters for the default TDigest algorithm used to calculate percentile ranks. */ tdigest?: AggregationsTDigest } @@ -3993,9 +6152,14 @@ export interface AggregationsPercentilesAggregateBase extends AggregationsAggreg } export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { + /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. + * Set to `false` to disable this behavior. */ keyed?: boolean + /** The percentiles to calculate. */ percents?: double[] + /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. */ hdr?: AggregationsHdrMethod + /** Sets parameters for the default TDigest algorithm used to calculate percentiles. */ tdigest?: AggregationsTDigest } @@ -4003,17 +6167,28 @@ export interface AggregationsPercentilesBucketAggregate extends AggregationsPerc } export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { + /** The list of percentiles to calculate. */ percents?: double[] } export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { + /** `DecimalFormat` pattern for the output value. + * If specified, the formatted value is returned in the aggregation’s `value_as_string` property. */ format?: string + /** Policy to apply when gaps are found in the data. */ gap_policy?: AggregationsGapPolicy } export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { + /** The probability that a document will be included in the aggregated data. + * Must be greater than 0, less than 0.5, or exactly 1. + * The lower the probability, the fewer documents are matched. */ probability: double + /** The seed to generate the random sampling of documents. + * When a seed is provided, the random subset of documents is the same between calls. */ seed?: integer + /** When combined with seed, setting shard_seed ensures 100% consistent sampling over shards where data is exactly the same. + * @remarks This property is not supported on Elastic Cloud Serverless. */ shard_seed?: integer } @@ -4021,10 +6196,15 @@ export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggre } export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are use to build ranges. */ field?: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: integer + /** An array of ranges used to bucket documents. */ ranges?: AggregationsAggregationRange[] script?: Script | string + /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ keyed?: boolean format?: string } @@ -4034,17 +6214,26 @@ export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase to?: double from_as_string?: string to_as_string?: string + /** The bucket key. Present if the aggregation is _not_ keyed */ key?: string } export type AggregationsRangeBucket = AggregationsRangeBucketKeys & { [property: string]: AggregationsAggregate | double | string | long } export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { + /** Terms that should be excluded from the aggregation. */ exclude?: AggregationsTermsExclude + /** The field from which to return rare terms. */ field?: Field + /** Terms that should be included in the aggregation. */ include?: AggregationsTermsInclude + /** The maximum number of documents a term should appear in. */ max_doc_count?: long + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing + /** The precision of the internal CuckooFilters. + * Smaller precision leads to better approximation, but higher memory usage. */ precision?: double value_type?: string } @@ -4055,7 +6244,10 @@ export interface AggregationsRateAggregate extends AggregationsAggregateBase { } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { + /** The interval used to calculate the rate. + * By default, the interval of the `date_histogram` is used. */ unit?: AggregationsCalendarInterval + /** How the rate is calculated. */ mode?: AggregationsRateMode } @@ -4067,6 +6259,8 @@ export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggreg & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { + /** Defines the nested object field that should be joined back to. + * The default is empty, which means that it joins back to the root/main document level. */ path?: Field } @@ -4076,6 +6270,7 @@ export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { + /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ shard_size?: integer } @@ -4090,14 +6285,26 @@ export interface AggregationsScriptedMetricAggregate extends AggregationsAggrega } export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { + /** Runs once on each shard after document collection is complete. + * Allows the aggregation to consolidate the state returned from each shard. */ combine_script?: Script | string + /** Runs prior to any collection of documents. + * Allows the aggregation to set up any initial state. */ init_script?: Script | string + /** Run once per document collected. + * If no `combine_script` is specified, the resulting state needs to be stored in the `state` object. */ map_script?: Script | string + /** A global object with script parameters for `init`, `map` and `combine` scripts. + * It is shared between the scripts. */ params?: Record + /** Runs once on the coordinating node after all shards have returned their results. + * The script is provided with access to a variable `states`, which is an array of the result of the `combine_script` on each shard. */ reduce_script?: Script | string } export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { + /** The historical bucket to subtract from the current value. + * Must be a positive, non-zero integer. */ lag?: integer } @@ -4126,20 +6333,37 @@ export interface AggregationsSignificantTermsAggregateBase extends } export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { + /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ background_filter?: QueryDslQueryContainer + /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ chi_square?: AggregationsChiSquareHeuristic + /** Terms to exclude. */ exclude?: AggregationsTermsExclude + /** Mechanism by which the aggregation should be executed: using field values directly or using global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return significant terms. */ field?: Field + /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ gnd?: AggregationsGoogleNormalizedDistanceHeuristic + /** Terms to include. */ include?: AggregationsTermsInclude + /** Use JLH score as the significance score. */ jlh?: EmptyObject + /** Only return terms that are found in more than `min_doc_count` hits. */ min_doc_count?: long + /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ mutual_information?: AggregationsMutualInformationHeuristic + /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ percentage?: AggregationsPercentageScoreHeuristic + /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic + /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. + * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** Can be used to control the volumes of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** The number of buckets returned out of the overall terms list. */ size?: integer } @@ -4149,22 +6373,41 @@ export interface AggregationsSignificantTermsBucketBase extends AggregationsMult } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { + /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ background_filter?: QueryDslQueryContainer + /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ chi_square?: AggregationsChiSquareHeuristic + /** Values to exclude. */ exclude?: AggregationsTermsExclude + /** Determines whether the aggregation will use field values directly or global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return significant text. */ field?: Field + /** Whether to out duplicate text to deal with noisy data. */ filter_duplicate_text?: boolean + /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ gnd?: AggregationsGoogleNormalizedDistanceHeuristic + /** Values to include. */ include?: AggregationsTermsInclude + /** Use JLH score as the significance score. */ jlh?: EmptyObject + /** Only return values that are found in more than `min_doc_count` hits. */ min_doc_count?: long + /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ mutual_information?: AggregationsMutualInformationHeuristic + /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ percentage?: AggregationsPercentageScoreHeuristic + /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic + /** Regulates the certainty a shard has if the values should actually be added to the candidate list or not with respect to the min_doc_count. + * Values will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** The number of buckets returned out of the overall terms list. */ size?: integer + /** Overrides the JSON `_source` fields from which text will be analyzed. */ source_fields?: Fields } @@ -4181,6 +6424,8 @@ export interface AggregationsSingleBucketAggregateBase extends AggregationsAggre } export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { + /** The metric value. A missing value generally means that there was no data to aggregate, + * unless specified otherwise. */ value: double | null value_as_string?: string } @@ -4246,6 +6491,7 @@ export interface AggregationsStringStatsAggregate extends AggregationsAggregateB } export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { + /** Shows the probability distribution for all characters. */ show_distribution?: boolean } @@ -4268,7 +6514,10 @@ export interface AggregationsSumBucketAggregation extends AggregationsPipelineAg } export interface AggregationsTDigest { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: integer + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ execution_hint?: AggregationsTDigestExecutionHint } @@ -4286,8 +6535,11 @@ export interface AggregationsTTestAggregate extends AggregationsAggregateBase { } export interface AggregationsTTestAggregation { + /** Test population A. */ a?: AggregationsTestPopulation + /** Test population B. */ b?: AggregationsTestPopulation + /** The type of test. */ type?: AggregationsTTestType } @@ -4299,21 +6551,40 @@ export interface AggregationsTermsAggregateBase extends Aggre } export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { + /** Determines how child aggregations should be calculated: breadth-first or depth-first. */ collect_mode?: AggregationsTermsAggregationCollectMode + /** Values to exclude. + * Accepts regular expressions and partitions. */ exclude?: AggregationsTermsExclude + /** Determines whether the aggregation will use field values directly or global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return terms. */ field?: Field + /** Values to include. + * Accepts regular expressions and partitions. */ include?: AggregationsTermsInclude + /** Only return values that are found in more than `min_doc_count` hits. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing missing_order?: AggregationsMissingOrder missing_bucket?: boolean + /** Coerced unmapped fields into the specified type. */ value_type?: string + /** Specifies the sort order of the buckets. + * Defaults to sorting by descending document count. */ order?: AggregationsAggregateOrder script?: Script | string + /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. + * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** Set to `true` to return the `doc_count_error_upper_bound`, which is an upper bound to the error on the `doc_count` returned by each shard. */ show_term_doc_count_error?: boolean + /** The number of buckets returned out of the overall terms list. */ size?: integer format?: string } @@ -4331,13 +6602,17 @@ export type AggregationsTermsExclude = string | string[] export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition export interface AggregationsTermsPartition { + /** The number of partitions. */ num_partitions: long + /** The partition number for this request. */ partition: long } export interface AggregationsTestPopulation { + /** The field to aggregate. */ field: Field script?: Script | string + /** A filter used to define a set of records to run unpaired t-test on. */ filter?: QueryDslQueryContainer } @@ -4345,7 +6620,9 @@ export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucket } export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { + /** The maximum number of results to return. */ size?: integer + /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean } @@ -4360,18 +6637,33 @@ export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { + /** Fields for which to return doc values. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** If `true`, returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] + /** Starting document offset. */ from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in the search results. */ highlight?: SearchHighlight + /** Returns the result of one or more script evaluations for each hit. */ script_fields?: Record + /** The maximum number of top matching hits to return per bucket. */ size?: integer + /** Sort order of the top matching hits. + * By default, the hits are sorted by the score of the main query. */ sort?: Sort + /** Selects the fields of the source that are returned. */ _source?: SearchSourceConfig + /** Returns values for the specified stored fields (fields that use the `store` mapping option). */ stored_fields?: Fields + /** If `true`, calculates and returns document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If `true`, returns document version as part of a hit. */ version?: boolean + /** If `true`, returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean } @@ -4385,12 +6677,16 @@ export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBa } export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { + /** The fields of the top document to return. */ metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] + /** The number of top documents from which to return metrics. */ size?: integer + /** The sort order of the documents. */ sort?: Sort } export interface AggregationsTopMetricsValue { + /** A field to return as a metric. */ field: Field } @@ -4420,9 +6716,15 @@ export interface AggregationsVariableWidthHistogramAggregate extends Aggregation } export interface AggregationsVariableWidthHistogramAggregation { + /** The name of the field. */ field?: Field + /** The target number of buckets. */ buckets?: integer + /** The number of buckets that the coordinating node will request from each shard. + * Defaults to `buckets * 50`. */ shard_size?: integer + /** Specifies the number of individual documents that will be stored in memory on a shard before the initial bucketing algorithm is run. + * Defaults to `min(10 * shard_size, 50000)`. */ initial_buffer?: integer script?: Script | string } @@ -4439,14 +6741,19 @@ export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidth & { [property: string]: AggregationsAggregate | double | string | long } export interface AggregationsWeightedAverageAggregation { + /** A numeric response formatter. */ format?: string + /** Configuration for the field that provides the values. */ value?: AggregationsWeightedAverageValue value_type?: AggregationsValueType + /** Configuration for the field or script that provides the weights. */ weight?: AggregationsWeightedAverageValue } export interface AggregationsWeightedAverageValue { + /** The field from which to extract the values or weights. */ field?: Field + /** A value or weight to use if the field is missing. */ missing?: double script?: Script | string } @@ -4484,6 +6791,7 @@ export interface AnalysisArmenianAnalyzer { export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' + /** If `true`, emit both original tokens and folded tokens. Defaults to `false`. */ preserve_original?: SpecUtilsStringified } @@ -4559,7 +6867,9 @@ export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'ka export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase { type: 'cjk_bigram' + /** Array of character scripts for which to disable bigrams. */ ignored_scripts?: AnalysisCjkBigramIgnoredScript[] + /** If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`. */ output_unigrams?: boolean } @@ -4578,24 +6888,45 @@ export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' + /** A list of tokens. The filter generates bigrams for these tokens. + * Either this or the `common_words_path` parameter is required. */ common_words?: string[] + /** Path to a file containing a list of tokens. The filter generates bigrams for these tokens. + * This path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break. + * Either this or the `common_words` parameter is required. */ common_words_path?: string + /** If `true`, matches for common words matching are case-insensitive. Defaults to `false`. */ ignore_case?: boolean + /** If `true`, the filter excludes the following tokens from the output: + * - Unigrams for common words + * - Unigrams for terms followed by common words + * Defaults to `false`. We recommend enabling this parameter for search analyzers. */ query_mode?: boolean } export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { + /** Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`. */ max_subword_size?: integer + /** Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`. */ min_subword_size?: integer + /** Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`. */ min_word_size?: integer + /** If `true`, only include the longest matching subword. Defaults to `false`. */ only_longest_match?: boolean + /** A list of subwords to look for in the token stream. If found, the subword is included in the token output. + * Either this parameter or `word_list_path` must be specified. */ word_list?: string[] + /** Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output. + * This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. + * Either this parameter or `word_list` must be specified. */ word_list_path?: string } export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { type: 'condition' + /** Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided. */ filter: string[] + /** Predicate script used to apply token filters. If a token matches this script, the filters in the `filter` parameter are applied to the token. */ script: Script | string } @@ -4639,7 +6970,9 @@ export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { type: 'delimited_payload' + /** Character used to separate tokens from payloads. Defaults to `|`. */ delimiter?: string + /** Data type for the stored payload. */ encoding?: AnalysisDelimitedPayloadEncoding } @@ -4662,9 +6995,13 @@ export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { type: 'edge_ngram' + /** Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`. */ max_gram?: integer + /** Minimum character length of a gram. Defaults to `1`. */ min_gram?: integer + /** Indicates whether to truncate tokens from the `front` or `back`. Defaults to `front`. */ side?: AnalysisEdgeNGramSide + /** Emits original token when set to `true`. Defaults to `false`. */ preserve_original?: SpecUtilsStringified } @@ -4678,8 +7015,16 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { type: 'elision' + /** List of elisions to remove. + * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed. + * For custom `elision` filters, either this parameter or `articles_path` must be specified. */ articles?: string[] + /** Path to a file that contains a list of elisions to remove. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break. + * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed. + * For custom `elision` filters, either this parameter or `articles` must be specified. */ articles_path?: string + /** If `true`, elision matching is case insensitive. If `false`, elision matching is case sensitive. Defaults to `false`. */ articles_case?: SpecUtilsStringified } @@ -4708,7 +7053,9 @@ export interface AnalysisFingerprintAnalyzer { export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { type: 'fingerprint' + /** Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output. */ max_output_size?: integer + /** Character to use to concatenate the token stream input. Defaults to a space. */ separator?: string } @@ -4787,18 +7134,31 @@ export interface AnalysisHungarianAnalyzer { export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell' + /** If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`. */ dedup?: boolean + /** One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary. + * By default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, `language`, or `locale` parameter. */ dictionary?: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. */ locale: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. + * @alias locale */ lang: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. + * @alias locale */ language: string + /** If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`. */ longest_only?: boolean } export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { type: 'hyphenation_decompounder' + /** Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file. + * This path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported. */ hyphenation_patterns_path: string + /** If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`. */ no_sub_matches?: boolean + /** If `true`, do not allow overlapping tokens. Defaults to `false`. */ no_overlapping_matches?: boolean } @@ -4904,14 +7264,22 @@ export type AnalysisKeepTypesMode = 'include' | 'exclude' export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { type: 'keep_types' + /** Indicates whether to keep or remove the specified token types. */ mode?: AnalysisKeepTypesMode + /** List of token types to keep or remove. */ types: string[] } export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { type: 'keep' + /** List of words to keep. Only tokens that match words in this list are included in the output. + * Either this parameter or `keep_words_path` must be specified. */ keep_words?: string[] + /** If `true`, lowercase all keep words. Defaults to `false`. */ keep_words_case?: boolean + /** Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break. + * Either this parameter or `keep_words` must be specified. */ keep_words_path?: string } @@ -4922,9 +7290,17 @@ export interface AnalysisKeywordAnalyzer { export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { type: 'keyword_marker' + /** If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`. */ ignore_case?: boolean + /** Array of keywords. Tokens that match these keywords are not stemmed. + * This parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */ keywords?: string | string[] + /** Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break. + * This parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */ keywords_path?: string + /** Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed. + * This parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`. */ keywords_pattern?: string } @@ -4986,7 +7362,9 @@ export interface AnalysisLatvianAnalyzer { export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length' + /** Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. */ max?: integer + /** Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`. */ min?: integer } @@ -4996,7 +7374,9 @@ export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { type: 'limit' + /** If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`. */ consume_all_tokens?: boolean + /** Maximum number of tokens to keep. Once this limit is reached, any remaining tokens are excluded from the output. Defaults to `1`. */ max_token_count?: SpecUtilsStringified } @@ -5013,6 +7393,7 @@ export interface AnalysisLowercaseNormalizer { export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { type: 'lowercase' + /** Language-specific lowercase token filter to use. */ language?: AnalysisLowercaseTokenFilterLanguages } @@ -5030,22 +7411,32 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase { type: 'min_hash' + /** Number of buckets to which hashes are assigned. Defaults to `512`. */ bucket_count?: integer + /** Number of ways to hash each token in the stream. Defaults to `1`. */ hash_count?: integer + /** Number of hashes to keep from each bucket. Defaults to `1`. + * Hashes are retained by ascending size, starting with the bucket’s smallest hash first. */ hash_set_size?: integer + /** If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`. */ with_rotation?: boolean } export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer' + /** A list of token filters to apply to incoming tokens. */ filters: string[] + /** If `true` (the default) then emit the original token in addition to the filtered tokens. */ preserve_original?: SpecUtilsStringified } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { type: 'ngram' + /** Maximum length of characters in a gram. Defaults to `2`. */ max_gram?: integer + /** Minimum length of characters in a gram. Defaults to `1`. */ min_gram?: integer + /** Emits original token when set to `true`. Defaults to `false`. */ preserve_original?: SpecUtilsStringified } @@ -5069,6 +7460,7 @@ export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { type: 'nori_part_of_speech' + /** An array of part-of-speech tags that should be removed. */ stoptags?: string[] } @@ -5109,7 +7501,9 @@ export interface AnalysisPatternAnalyzer { export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_capture' + /** A list of regular expressions to match. */ patterns: string[] + /** If set to `true` (the default) it will emit the original token. */ preserve_original?: SpecUtilsStringified } @@ -5122,9 +7516,12 @@ export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_replace' + /** If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`. */ all?: boolean flags?: string + /** Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter. */ pattern: string + /** Replacement substring. Defaults to an empty substring (`""`). */ replacement?: string } @@ -5180,6 +7577,7 @@ export interface AnalysisPortugueseAnalyzer { export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' + /** Script containing a condition used to filter incoming tokens. Only tokens that match this script are included in the output. */ script: Script | string } @@ -5230,11 +7628,17 @@ export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFi export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' + /** String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`). */ filler_token?: string + /** Maximum number of tokens to concatenate when creating shingles. Defaults to `2`. */ max_shingle_size?: SpecUtilsStringified + /** Minimum number of tokens to concatenate when creating shingles. Defaults to `2`. */ min_shingle_size?: SpecUtilsStringified + /** If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`. */ output_unigrams?: boolean + /** If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`. */ output_unigrams_if_no_shingles?: boolean + /** Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`" "`). */ token_separator?: string } @@ -5264,6 +7668,7 @@ export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catal export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { type: 'snowball' + /** Controls the language used by the stemmer. */ language?: AnalysisSnowballLanguage } @@ -5298,13 +7703,16 @@ export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer_override' + /** A list of mapping rules to use. */ rules?: string[] + /** A path (either relative to `config` location, or absolute) to a list of mappings. */ rules_path?: string } export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer' language?: string + /** @alias language */ name?: string } @@ -5317,9 +7725,14 @@ export interface AnalysisStopAnalyzer { export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { type: 'stop' + /** If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`. */ ignore_case?: boolean + /** If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`. */ remove_trailing?: boolean + /** Language value, such as `_arabic_` or `_thai_`. Defaults to `_english_`. */ stopwords?: AnalysisStopWords + /** Path to a file that contains a list of stop words to remove. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break. */ stopwords_path?: string } @@ -5345,13 +7758,21 @@ export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBa } export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase { + /** Expands definitions for equivalent synonym rules. Defaults to `true`. */ expand?: boolean + /** Sets the synonym rules format. */ format?: AnalysisSynonymFormat + /** If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting. */ lenient?: boolean + /** Used to define inline synonyms. */ synonyms?: string[] + /** Used to provide a synonym file. This path must be absolute or relative to the `config` location. */ synonyms_path?: string + /** Provide a synonym set created via Synonyms Management APIs. */ synonyms_set?: string + /** Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0. */ tokenizer?: string + /** If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`. */ updateable?: boolean } @@ -5389,6 +7810,7 @@ export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { type: 'truncate' + /** Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`. */ length?: integer } @@ -5406,6 +7828,7 @@ export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { type: 'unique' + /** If `true`, only remove duplicate tokens in the same position. Defaults to `false`. */ only_on_same_position?: boolean } @@ -5425,7 +7848,9 @@ export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase { type: 'word_delimiter_graph' + /** If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`. */ adjust_offsets?: boolean + /** If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`. */ ignore_keywords?: boolean } @@ -5434,18 +7859,32 @@ export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterT } export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase { + /** If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_all?: boolean + /** If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_numbers?: boolean + /** If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`. */ catenate_words?: boolean + /** If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */ generate_number_parts?: boolean + /** If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */ generate_word_parts?: boolean + /** If `true`, the filter includes the original version of any split tokens in the output. This original version includes non-alphanumeric delimiters. Defaults to `false`. */ preserve_original?: SpecUtilsStringified + /** Array of tokens the filter won’t split. */ protected_words?: string[] + /** Path to a file that contains a list of tokens the filter won’t split. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. */ protected_words_path?: string + /** If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`. */ split_on_case_change?: boolean + /** If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`. */ split_on_numerics?: boolean + /** If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`. */ stem_english_possessive?: boolean + /** Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */ type_table?: string[] + /** Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */ type_table_path?: string } @@ -5482,6 +7921,8 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean script?: Script | string on_script_error?: MappingOnScriptError + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean type: 'boolean' } @@ -5492,9 +7933,18 @@ export interface MappingByteNumberProperty extends MappingNumberPropertyBase { } export interface MappingChunkingSettings { + /** The chunking strategy: `sentence` or `word`. */ strategy: string + /** The maximum size of a chunk in words. + * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ max_chunk_size: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ sentence_overlap?: integer } @@ -5565,9 +8015,27 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float' export interface MappingDenseVectorIndexOptions { + /** The confidence interval to use when quantizing the vectors. Can be any value between and including `0.90` and + * `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic quantiles should be calculated for + * optimized quantization. When between `0.90` and `1.0`, this value restricts the values used when calculating + * the quantization thresholds. + * + * For example, a value of `0.95` will only use the middle `95%` of the values when calculating the quantization + * thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). + * + * Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` for dynamic quantile calculation. + * + * Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` index types. */ confidence_interval?: float + /** The number of candidates to track while assembling the list of nearest neighbors for each new node. + * + * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ ef_construction?: integer + /** The number of neighbors each node will be connected to in the HNSW graph. + * + * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ m?: integer + /** The type of kNN algorithm to use. */ type: MappingDenseVectorIndexOptionsType } @@ -5575,10 +8043,30 @@ export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'flat export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' + /** Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, it will be set to the length of + * the first vector added to the field. */ dims?: integer + /** The data type used to encode vectors. The supported data types are `float` (default), `byte`, and `bit`. */ element_type?: MappingDenseVectorElementType + /** If `true`, you can search this field using the kNN search API. */ index?: boolean + /** An optional section that configures the kNN indexing algorithm. The HNSW algorithm has two internal parameters + * that influence how the data structure is built. These can be adjusted to improve the accuracy of results, at the + * expense of slower indexing speed. + * + * This parameter can only be specified when `index` is `true`. */ index_options?: MappingDenseVectorIndexOptions + /** The vector similarity metric to use in kNN search. + * + * Documents are ranked by their vector field's similarity to the query vector. The `_score` of each document will + * be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds + * to a higher ranking. + * + * Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to `cosine`. + * + * `bit` vectors only support `l2_norm` as their similarity metric. + * + * This parameter can only be specified when `index` is `true`. */ similarity?: MappingDenseVectorSimilarity } @@ -5717,7 +8205,9 @@ export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBas type: 'icu_collation_keyword' norms?: boolean index_options?: MappingIndexOptions + /** Should the field be searchable? */ index?: boolean + /** Accepts a string value which is substituted for any explicit null values. Defaults to null, which means the field is treated as missing. */ null_value?: string rules?: string language?: string @@ -5755,6 +8245,8 @@ export interface MappingIpProperty extends MappingDocValuesPropertyBase { null_value?: string on_script_error?: MappingOnScriptError script?: Script | string + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean type: 'ip' } @@ -5781,6 +8273,8 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { null_value?: string similarity?: string | null split_queries_on_whitespace?: boolean + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean type: 'keyword' } @@ -5796,8 +8290,13 @@ export interface MappingLongRangeProperty extends MappingRangePropertyBase { export interface MappingMatchOnlyTextProperty { type: 'match_only_text' + /** Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one + * field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ fields?: Record + /** Metadata about the field. */ meta?: Record + /** Allows you to copy the values of multiple fields into a group + * field, which can then be queried as a single field. */ copy_to?: Fields } @@ -5821,7 +8320,11 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase index?: boolean on_script_error?: MappingOnScriptError script?: Script | string + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_metric?: MappingTimeSeriesMetricType + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean } @@ -5854,6 +8357,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingRankVectorProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { + /** Metadata about the field. */ meta?: Record properties?: Record ignore_above?: integer @@ -5891,13 +8395,21 @@ export interface MappingRoutingField { } export interface MappingRuntimeField { + /** For type `composite` */ fields?: Record + /** For type `lookup` */ fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] + /** A custom format for `date` type runtime fields. */ format?: string + /** For type `lookup` */ input_field?: Field + /** For type `lookup` */ target_field?: Field + /** For type `lookup` */ target_index?: IndexName + /** Painless script executed at query time. */ script?: Script | string + /** Field type, which can be: `boolean`, `composite`, `date`, `double`, `geo_point`, `ip`,`keyword`, `long`, or `lookup`. */ type: MappingRuntimeFieldType } @@ -5936,9 +8448,20 @@ export interface MappingSemanticTextIndexOptions { export interface MappingSemanticTextProperty { type: 'semantic_text' meta?: Record + /** Inference endpoint that will be used to generate embeddings for the field. + * This parameter cannot be updated. Use the Create inference API to create the endpoint. + * If `search_inference_id` is specified, the inference endpoint will only be used at index time. */ inference_id?: Id + /** Inference endpoint that will be used to generate embeddings at query time. + * You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint. + * If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */ search_inference_id?: Id + /** Settings for index_options that override any defaults used by semantic_text, for example + * specific quantization settings. */ index_options?: MappingSemanticTextIndexOptions + /** Settings for chunking text into smaller passages. If specified, these will override the + * chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated, + * they will not be applied to existing documents until they are reindexed. */ chunking_settings?: MappingChunkingSettings } @@ -5971,13 +8494,22 @@ export interface MappingSourceField { export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' export interface MappingSparseVectorIndexOptions { + /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. + * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. + * Default: false */ prune?: boolean + /** Optional pruning configuration. + * If enabled, this will omit non-significant tokens from the query in order to improve query performance. + * This is only used if prune is set to true. + * If prune is set to true but pruning_config is not specified, default values will be used. */ pruning_config?: TokenPruningConfig } export interface MappingSparseVectorProperty extends MappingPropertyBase { store?: boolean type: 'sparse_vector' + /** Additional index options for the sparse vector field that controls the + * token pruning behavior of the sparse vector field. */ index_options?: MappingSparseVectorIndexOptions } @@ -6064,16 +8596,26 @@ export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { } export interface QueryDslBoolQuery extends QueryDslQueryBase { + /** The clause (query) must appear in matching documents. + * However, unlike `must`, the score of the query will be ignored. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** Specifies the number or percentage of `should` clauses returned documents must match. */ minimum_should_match?: MinimumShouldMatch + /** The clause (query) must appear in matching documents and will contribute to the score. */ must?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The clause (query) must not appear in the matching documents. + * Because scoring is ignored, a score of `0` is returned for all documents. */ must_not?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The clause (query) should appear in the matching document. */ should?: QueryDslQueryContainer | QueryDslQueryContainer[] } export interface QueryDslBoostingQuery extends QueryDslQueryBase { + /** Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. */ negative_boost: double + /** Query used to decrease the relevance score of matching documents. */ negative: QueryDslQueryContainer + /** Any returned documents must match this query. */ positive: QueryDslQueryContainer } @@ -6082,11 +8624,18 @@ export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' export type QueryDslCombinedFieldsOperator = 'or' | 'and' export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { + /** List of fields to search. Field wildcard patterns are allowed. Only `text` fields are supported, and they must all have the same search `analyzer`. */ fields: Field[] + /** Text to search for in the provided `fields`. + * The `combined_fields` query analyzes the provided text before performing a search. */ query: string + /** If true, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslCombinedFieldsOperator + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslCombinedFieldsZeroTerms } @@ -6102,6 +8651,9 @@ export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { } export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { + /** Filter query you wish to run. Any returned documents must match this query. + * Filter queries do not calculate relevance scores. + * To speed up performance, Elasticsearch automatically caches frequently used filter queries. */ filter: QueryDslQueryContainer } @@ -6114,70 +8666,111 @@ export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatur } export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { + /** Date format used to convert `date` values in the query. */ format?: DateFormat + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ time_zone?: TimeZone } export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction export interface QueryDslDecayFunctionBase { + /** Determines how the distance is calculated when a field used for computing the decay contains multiple values. */ multi_value_mode?: QueryDslMultiValueMode } export interface QueryDslDecayPlacement { + /** Defines how documents are scored at the distance given at scale. */ decay?: double + /** If defined, the decay function will only compute the decay function for documents with a distance greater than the defined `offset`. */ offset?: TScale + /** Defines the distance from origin + offset at which the computed score will equal `decay` parameter. */ scale?: TScale + /** The point of origin used for calculating distance. Must be given as a number for numeric field, date for date fields and geo point for geo fields. */ origin?: TOrigin } export interface QueryDslDisMaxQuery extends QueryDslQueryBase { + /** One or more query clauses. + * Returned documents must match one or more of these queries. + * If a document matches multiple queries, Elasticsearch uses the highest relevance score. */ queries: QueryDslQueryContainer[] + /** Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. */ tie_breaker?: double } export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { + /** Date or point of origin used to calculate distances. + * If the `field` value is a `date` or `date_nanos` field, the `origin` value must be a date. + * Date Math, such as `now-1h`, is supported. + * If the field value is a `geo_point` field, the `origin` value must be a geopoint. */ origin: TOrigin + /** Distance from the `origin` at which relevance scores receive half of the `boost` value. + * If the `field` value is a `date` or `date_nanos` field, the `pivot` value must be a time unit, such as `1h` or `10d`. If the `field` value is a `geo_point` field, the `pivot` value must be a distance unit, such as `1km` or `12m`. */ pivot: TDistance + /** Name of the field used to calculate distances. This field must meet the following criteria: + * be a `date`, `date_nanos` or `geo_point` field; + * have an `index` mapping parameter value of `true`, which is the default; + * have an `doc_values` mapping parameter value of `true`, which is the default. */ field: Field } export interface QueryDslExistsQuery extends QueryDslQueryBase { + /** Name of the field you wish to search. */ field: Field } export interface QueryDslFieldAndFormat { + /** A wildcard pattern. The request returns values for field names matching this pattern. */ field: Field + /** The format in which the values are returned. */ format?: string include_unmapped?: boolean } export interface QueryDslFieldLookup { + /** `id` of the document. */ id: Id + /** Index from which to retrieve the document. */ index?: IndexName + /** Name of the field. */ path?: Field + /** Custom routing value. */ routing?: Routing } export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' export interface QueryDslFieldValueFactorScoreFunction { + /** Field to be extracted from the document. */ field: Field + /** Optional factor to multiply the field value with. */ factor?: double + /** Value used if the document doesn’t have that field. + * The modifier and factor are still applied to it as though it were read from the document. */ missing?: double + /** Modifier to apply to the field value. */ modifier?: QueryDslFieldValueFactorModifier } export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' export interface QueryDslFunctionScoreContainer { + /** Function that scores a document with a exponential decay, depending on the distance of a numeric field value of the document from an origin. */ exp?: QueryDslDecayFunction + /** Function that scores a document with a normal decay, depending on the distance of a numeric field value of the document from an origin. */ gauss?: QueryDslDecayFunction + /** Function that scores a document with a linear decay, depending on the distance of a numeric field value of the document from an origin. */ linear?: QueryDslDecayFunction + /** Function allows you to use a field from a document to influence the score. + * It’s similar to using the script_score function, however, it avoids the overhead of scripting. */ field_value_factor?: QueryDslFieldValueFactorScoreFunction + /** Generates scores that are uniformly distributed from 0 up to but not including 1. + * In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. */ random_score?: QueryDslRandomScoreFunction + /** Enables you to wrap another query and customize the scoring of it optionally with a computation derived from other numeric field values in the doc using a script expression. */ script_score?: QueryDslScriptScoreFunction filter?: QueryDslQueryContainer weight?: double @@ -6186,26 +8779,42 @@ export interface QueryDslFunctionScoreContainer { export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { + /** Defines how he newly computed score is combined with the score of the query */ boost_mode?: QueryDslFunctionBoostMode + /** One or more functions that compute a new score for each document returned by the query. */ functions?: QueryDslFunctionScoreContainer[] + /** Restricts the new score to not exceed the provided limit. */ max_boost?: double + /** Excludes documents that do not meet the provided score threshold. */ min_score?: double + /** A query that determines the documents for which a new score is computed. */ query?: QueryDslQueryContainer + /** Specifies how the computed scores are combined */ score_mode?: QueryDslFunctionScoreMode } export interface QueryDslFuzzyQuery extends QueryDslQueryBase { + /** Maximum number of variations created. */ max_expansions?: integer + /** Number of beginning characters left unchanged when creating expansions. */ prefix_length?: integer + /** Number of beginning characters left unchanged when creating expansions. */ rewrite?: MultiTermQueryRewrite + /** Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`). */ transpositions?: boolean + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Term you wish to find in the provided field. */ value: string | double | boolean } export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { type?: QueryDslGeoExecution + /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. + * Set to `COERCE` to also try to infer correct latitude or longitude. */ validation_method?: QueryDslGeoValidationMethod + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys @@ -6220,9 +8829,17 @@ export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeature } export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { + /** The radius of the circle centred on the specified location. + * Points which fall into this circle are considered to be matches. */ distance: Distance + /** How to compute the distance. + * Set to `plane` for a faster calculation that's inaccurate on long distances and close to the poles. */ distance_type?: GeoDistanceType + /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. + * Set to `COERCE` to also try to infer correct latitude or longitude. */ validation_method?: QueryDslGeoValidationMethod + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys @@ -6249,11 +8866,15 @@ export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys export interface QueryDslGeoShapeFieldQuery { shape?: GeoShape + /** Query using an indexed shape retrieved from the the specified document and path. */ indexed_shape?: QueryDslFieldLookup + /** Spatial relation operator used to search a geo field. */ relation?: GeoShapeRelation } export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys @@ -6262,125 +8883,211 @@ export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' export interface QueryDslHasChildQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Maximum number of child documents that match the query allowed for a returned parent document. + * If the parent document exceeds this limit, it is excluded from the search results. */ max_children?: integer + /** Minimum number of child documents that match the query required to match the query for a returned parent document. + * If the parent document does not meet this limit, it is excluded from the search results. */ min_children?: integer + /** Query you wish to run on child documents of the `type` field. + * If a child document matches the search, the query returns the parent document. */ query: QueryDslQueryContainer + /** Indicates how scores for matching child documents affect the root parent document’s relevance score. */ score_mode?: QueryDslChildScoreMode + /** Name of the child relationship mapped for the `join` field. */ type: RelationName } export interface QueryDslHasParentQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error. + * You can use this parameter to query multiple indices that may not contain the `parent_type`. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Name of the parent relationship mapped for the `join` field. */ parent_type: RelationName + /** Query you wish to run on parent documents of the `parent_type` field. + * If a parent document matches the search, the query returns its child documents. */ query: QueryDslQueryContainer + /** Indicates whether the relevance score of a matching parent document is aggregated into its child documents. */ score?: boolean } export interface QueryDslIdsQuery extends QueryDslQueryBase { + /** An array of document IDs. */ values?: Ids } export interface QueryDslIntervalsAllOf { + /** An array of rules to combine. All rules must produce a match in a document for the overall source to match. */ intervals: QueryDslIntervalsContainer[] + /** Maximum number of positions between the matching terms. + * Intervals produced by the rules further apart than this are not considered matches. */ max_gaps?: integer + /** If `true`, intervals produced by the rules should appear in the order in which they are specified. */ ordered?: boolean + /** Rule used to filter returned intervals. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsAnyOf { + /** An array of rules to match. */ intervals: QueryDslIntervalsContainer[] + /** Rule used to filter returned intervals. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsContainer { + /** Returns matches that span a combination of other rules. */ all_of?: QueryDslIntervalsAllOf + /** Returns intervals produced by any of its sub-rules. */ any_of?: QueryDslIntervalsAnyOf + /** Matches analyzed text. */ fuzzy?: QueryDslIntervalsFuzzy + /** Matches analyzed text. */ match?: QueryDslIntervalsMatch + /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix range?: QueryDslIntervalsRange regexp?: QueryDslIntervalsRegexp + /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } export interface QueryDslIntervalsFilter { + /** Query used to return intervals that follow an interval from the `filter` rule. */ after?: QueryDslIntervalsContainer + /** Query used to return intervals that occur before an interval from the `filter` rule. */ before?: QueryDslIntervalsContainer + /** Query used to return intervals contained by an interval from the `filter` rule. */ contained_by?: QueryDslIntervalsContainer + /** Query used to return intervals that contain an interval from the `filter` rule. */ containing?: QueryDslIntervalsContainer + /** Query used to return intervals that are **not** contained by an interval from the `filter` rule. */ not_contained_by?: QueryDslIntervalsContainer + /** Query used to return intervals that do **not** contain an interval from the `filter` rule. */ not_containing?: QueryDslIntervalsContainer + /** Query used to return intervals that do **not** overlap with an interval from the `filter` rule. */ not_overlapping?: QueryDslIntervalsContainer + /** Query used to return intervals that overlap with an interval from the `filter` rule. */ overlapping?: QueryDslIntervalsContainer + /** Script used to return matching documents. + * This script must return a boolean value: `true` or `false`. */ script?: Script | string } export interface QueryDslIntervalsFuzzy { + /** Analyzer used to normalize the term. */ analyzer?: string + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Number of beginning characters left unchanged when creating expansions. */ prefix_length?: integer + /** The term to match. */ term: string + /** Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). */ transpositions?: boolean + /** If specified, match intervals from this field rather than the top-level field. + * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export interface QueryDslIntervalsMatch { + /** Analyzer used to analyze terms in the query. */ analyzer?: string + /** Maximum number of positions between the matching terms. + * Terms further apart than this are not considered matches. */ max_gaps?: integer + /** If `true`, matching terms must appear in their specified order. */ ordered?: boolean + /** Text you wish to find in the provided field. */ query: string + /** If specified, match intervals from this field rather than the top-level field. + * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field + /** An optional interval filter. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsPrefix { + /** Analyzer used to analyze the `prefix`. */ analyzer?: string + /** Beginning characters of terms you wish to find in the top-level field. */ prefix: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export interface QueryDslIntervalsQuery extends QueryDslQueryBase { + /** Returns matches that span a combination of other rules. */ all_of?: QueryDslIntervalsAllOf + /** Returns intervals produced by any of its sub-rules. */ any_of?: QueryDslIntervalsAnyOf + /** Matches terms that are similar to the provided term, within an edit distance defined by `fuzziness`. */ fuzzy?: QueryDslIntervalsFuzzy + /** Matches analyzed text. */ match?: QueryDslIntervalsMatch + /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix range?: QueryDslIntervalsRange regexp?: QueryDslIntervalsRegexp + /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } export interface QueryDslIntervalsRange { + /** Analyzer used to analyze the `prefix`. */ analyzer?: string + /** Lower term, either gte or gt must be provided. */ gte?: string + /** Lower term, either gte or gt must be provided. */ gt?: string + /** Upper term, either lte or lt must be provided. */ lte?: string + /** Upper term, either lte or lt must be provided. */ lt?: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export interface QueryDslIntervalsRegexp { + /** Analyzer used to analyze the `prefix`. */ analyzer?: string + /** Regex pattern. */ pattern: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export interface QueryDslIntervalsWildcard { + /** Analyzer used to analyze the `pattern`. + * Defaults to the top-level field's analyzer. */ analyzer?: string + /** Wildcard pattern used to find matching terms. */ pattern: string + /** If specified, match intervals from this field rather than the top-level field. + * The `pattern` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export type QueryDslLike = string | QueryDslLikeDocument export interface QueryDslLikeDocument { + /** A document not present in the index. */ doc?: any fields?: Field[] + /** ID of a document. */ _id?: Id + /** Index of a document. */ _index?: IndexName + /** Overrides the default analyzer. */ per_field_analyzer?: Record routing?: Routing version?: VersionNumber @@ -6391,14 +9098,31 @@ export interface QueryDslMatchAllQuery extends QueryDslQueryBase { } export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** Maximum edit distance allowed for matching. + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_transpositions?: boolean + /** Maximum number of terms to which the query will expand. + * Can be applied to the term subqueries constructed for all terms but the final term. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. + * Applied to the constructed bool query. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. + * Applied to the constructed bool query. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. + * Can be applied to the term subqueries constructed for all terms but the final term. */ prefix_length?: integer + /** Terms you wish to find in the provided field. + * The last term is used in a prefix query. */ query: string } @@ -6406,84 +9130,149 @@ export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { } export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { + /** Analyzer used to convert text in the query value into tokens. */ analyzer?: string + /** Maximum number of terms to which the last provided term of the query value will expand. */ max_expansions?: integer + /** Text you wish to find in the provided field. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** Query terms that are analyzed and turned into a phrase query. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMatchQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean cutoff_frequency?: double + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of terms to which the query will expand. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. */ prefix_length?: integer + /** Text, number, boolean value or date you wish to find in the provided field. */ query: string | float | boolean + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { + /** The analyzer that is used to analyze the free form text. + * Defaults to the analyzer associated with the first field in fields. */ analyzer?: string + /** Each term in the formed query could be further boosted by their tf-idf score. + * This sets the boost factor to use when using this feature. + * Defaults to deactivated (0). */ boost_terms?: double + /** Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`). */ fail_on_unsupported_field?: boolean + /** A list of fields to fetch and analyze the text from. + * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ fields?: Field[] + /** Specifies whether the input documents should also be included in the search results returned. */ include?: boolean + /** Specifies free form text and/or a single or multiple documents for which you want to find similar documents. */ like: QueryDslLike | QueryDslLike[] + /** The maximum document frequency above which the terms are ignored from the input document. */ max_doc_freq?: integer + /** The maximum number of query terms that can be selected. */ max_query_terms?: integer + /** The maximum word length above which the terms are ignored. + * Defaults to unbounded (`0`). */ max_word_length?: integer + /** The minimum document frequency below which the terms are ignored from the input document. */ min_doc_freq?: integer + /** After the disjunctive query has been formed, this parameter controls the number of terms that must match. */ minimum_should_match?: MinimumShouldMatch + /** The minimum term frequency below which the terms are ignored from the input document. */ min_term_freq?: integer + /** The minimum word length below which the terms are ignored. */ min_word_length?: integer routing?: Routing + /** An array of stop words. + * Any word in this set is ignored. */ stop_words?: AnalysisStopWords + /** Used in combination with `like` to exclude documents that match a set of terms. */ unlike?: QueryDslLike | QueryDslLike[] version?: VersionNumber version_type?: VersionType } export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean cutoff_frequency?: double + /** The fields to be queried. + * Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. */ fields?: Fields + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of terms to which the query will expand. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. */ prefix_length?: integer + /** Text, number, boolean value or date you wish to find in the provided field. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Determines how scores for each per-term blended query and scores across groups are combined. */ tie_breaker?: double + /** How `the` multi_match query is executed internally. */ type?: QueryDslTextQueryType + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' export interface QueryDslNestedQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped path and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Path to the nested object you wish to search. */ path: Field + /** Query you wish to run on nested objects in the path. */ query: QueryDslQueryContainer + /** How scores for matching child objects affect the root parent document’s relevance score. */ score_mode?: QueryDslChildScoreMode } @@ -6498,134 +9287,262 @@ export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' export interface QueryDslParentIdQuery extends QueryDslQueryBase { + /** ID of the parent document. */ id?: Id + /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** Name of the child relationship mapped for the `join` field. */ type?: RelationName } export interface QueryDslPercolateQuery extends QueryDslQueryBase { + /** The source of the document being percolated. */ document?: any + /** An array of sources of the documents being percolated. */ documents?: any[] + /** Field that holds the indexed queries. The field must use the `percolator` mapping type. */ field: Field + /** The ID of a stored document to percolate. */ id?: Id + /** The index of a stored document to percolate. */ index?: IndexName + /** The suffix used for the `_percolator_document_slot` field when multiple `percolate` queries are specified. */ name?: string + /** Preference used to fetch document to percolate. */ preference?: string + /** Routing used to fetch document to percolate. */ routing?: Routing + /** The expected version of a stored document to percolate. */ version?: VersionNumber } export interface QueryDslPinnedDoc { + /** The unique document ID. */ _id: Id + /** The index that contains the document. */ _index: IndexName } export interface QueryDslPinnedQuery extends QueryDslQueryBase { + /** Any choice of query used to rank documents which will be ranked below the "pinned" documents. */ organic: QueryDslQueryContainer + /** Document IDs listed in the order they are to appear in results. + * Required if `docs` is not specified. */ ids?: Id[] + /** Documents listed in the order they are to appear in results. + * Required if `ids` is not specified. */ docs?: QueryDslPinnedDoc[] } export interface QueryDslPrefixQuery extends QueryDslQueryBase { + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Beginning characters of terms you wish to find in the provided field. */ value: string + /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. + * Default is `false` which means the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean } export interface QueryDslQueryBase { + /** Floating point number used to decrease or increase the relevance scores of the query. + * Boost values are relative to the default value of 1.0. + * A boost value between 0 and 1.0 decreases the relevance score. + * A value greater than 1.0 increases the relevance score. */ boost?: float _name?: string } export interface QueryDslQueryContainer { + /** matches documents matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns documents matching a `positive` query while reducing the relevance score of documents that also match a `negative` query. */ boosting?: QueryDslBoostingQuery common?: Partial> + /** The `combined_fields` query supports searching multiple text fields as if their contents had been indexed into one combined field. */ combined_fields?: QueryDslCombinedFieldsQuery + /** Wraps a filter query and returns every matching document with a relevance score equal to the `boost` parameter value. */ constant_score?: QueryDslConstantScoreQuery + /** Returns documents matching one or more wrapped queries, called query clauses or clauses. + * If a returned document matches multiple query clauses, the `dis_max` query assigns the document the highest relevance score from any matching clause, plus a tie breaking increment for any additional matching subqueries. */ dis_max?: QueryDslDisMaxQuery + /** Boosts the relevance score of documents closer to a provided origin date or point. + * For example, you can use this query to give more weight to documents closer to a certain date or location. */ distance_feature?: QueryDslDistanceFeatureQuery + /** Returns documents that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** The `function_score` enables you to modify the score of documents that are retrieved by a query. */ function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[] + /** Returns documents that contain terms similar to the search term, as measured by a Levenshtein edit distance. */ fuzzy?: Partial> + /** Matches geo_point and geo_shape values that intersect a bounding box. */ geo_bounding_box?: QueryDslGeoBoundingBoxQuery + /** Matches `geo_point` and `geo_shape` values within a given distance of a geopoint. */ geo_distance?: QueryDslGeoDistanceQuery + /** Matches `geo_point` and `geo_shape` values that intersect a grid cell from a GeoGrid aggregation. */ geo_grid?: Partial> geo_polygon?: QueryDslGeoPolygonQuery + /** Filter documents indexed using either the `geo_shape` or the `geo_point` type. */ geo_shape?: QueryDslGeoShapeQuery + /** Returns parent documents whose joined child documents match a provided query. */ has_child?: QueryDslHasChildQuery + /** Returns child documents whose joined parent document matches a provided query. */ has_parent?: QueryDslHasParentQuery + /** Returns documents based on their IDs. + * This query uses document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns documents based on the order and proximity of matching terms. */ intervals?: Partial> + /** Finds the k nearest vectors to a query vector, as measured by a similarity + * metric. knn query finds nearest vectors through approximate search on indexed + * dense_vectors. */ knn?: KnnQuery + /** Returns documents that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all documents, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Analyzes its input and constructs a `bool` query from the terms. + * Each term except the last is used in a `term` query. + * The last term is used in a prefix query. */ match_bool_prefix?: Partial> + /** Matches no documents. */ match_none?: QueryDslMatchNoneQuery + /** Analyzes the text and creates a phrase query out of the analyzed text. */ match_phrase?: Partial> + /** Returns documents that contain the words of a provided text, in the same order as provided. + * The last term of the provided text is treated as a prefix, matching any words that begin with that term. */ match_phrase_prefix?: Partial> + /** Returns documents that are "like" a given set of documents. */ more_like_this?: QueryDslMoreLikeThisQuery + /** Enables you to search for a provided text, number, date or boolean value across multiple fields. + * The provided text is analyzed before matching. */ multi_match?: QueryDslMultiMatchQuery + /** Wraps another query to search nested fields. + * If an object matches the search, the nested query returns the root parent document. */ nested?: QueryDslNestedQuery + /** Returns child documents joined to a specific parent document. */ parent_id?: QueryDslParentIdQuery + /** Matches queries stored in an index. */ percolate?: QueryDslPercolateQuery + /** Promotes selected documents to rank higher than those matching a given query. */ pinned?: QueryDslPinnedQuery + /** Returns documents that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns documents based on a provided query string, using a parser with a strict syntax. */ query_string?: QueryDslQueryStringQuery + /** Returns documents that contain terms within a provided range. */ range?: Partial> + /** Boosts the relevance score of documents based on the numeric value of a `rank_feature` or `rank_features` field. */ rank_feature?: QueryDslRankFeatureQuery + /** Returns documents that contain terms matching a regular expression. */ regexp?: Partial> rule?: QueryDslRuleQuery + /** Filters documents based on a provided script. + * The script query is typically used in a filter context. */ script?: QueryDslScriptQuery + /** Uses a script to provide a custom score for returned documents. */ script_score?: QueryDslScriptScoreQuery + /** A semantic query to semantic_text field types */ semantic?: QueryDslSemanticQuery + /** Queries documents that contain fields indexed using the `shape` type. */ shape?: QueryDslShapeQuery + /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns matches which enclose another span query. */ span_containing?: QueryDslSpanContainingQuery + /** Wrapper to allow span queries to participate in composite single-field span queries by _lying_ about their search field. */ span_field_masking?: QueryDslSpanFieldMaskingQuery + /** Matches spans near the beginning of a field. */ span_first?: QueryDslSpanFirstQuery + /** Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query) as a `span` query, so it can be nested. */ span_multi?: QueryDslSpanMultiTermQuery + /** Matches spans which are near one another. + * You can specify `slop`, the maximum number of intervening unmatched positions, as well as whether matches are required to be in-order. */ span_near?: QueryDslSpanNearQuery + /** Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens after (controlled by the parameter `post`) another span query. */ span_not?: QueryDslSpanNotQuery + /** Matches the union of its span clauses. */ span_or?: QueryDslSpanOrQuery + /** Matches spans containing a term. */ span_term?: Partial> + /** Returns matches which are enclosed inside another span query. */ span_within?: QueryDslSpanWithinQuery + /** Using input query vectors or a natural language processing model to convert a query into a list of token-weight pairs, queries against a sparse vector field. */ sparse_vector?: QueryDslSparseVectorQuery + /** Returns documents that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns documents that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns documents that contain a minimum number of exact terms in a provided field. + * To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. */ terms_set?: Partial> + /** Uses a natural language processing model to convert the query text into a list of token-weight pairs which are then used in a query against a sparse vector or rank features field. */ text_expansion?: Partial> + /** Supports returning text_expansion query results by sending in precomputed tokens with the query. */ weighted_tokens?: Partial> + /** Returns documents that contain terms matching a wildcard pattern. */ wildcard?: Partial> + /** A query that accepts any other query as base64 encoded string. */ wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery } export interface QueryDslQueryStringQuery extends QueryDslQueryBase { + /** If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string. */ allow_leading_wildcard?: boolean + /** Analyzer used to convert text in the query string into tokens. */ analyzer?: string + /** If `true`, the query attempts to analyze wildcard terms in the query string. */ analyze_wildcard?: boolean + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean + /** Default field to search if no field is provided in the query string. + * Supports wildcards (`*`). + * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ default_field?: Field + /** Default boolean logic used to interpret text in the query string if no operators are specified. */ default_operator?: QueryDslOperator + /** If `true`, enable position increments in queries constructed from a `query_string` search. */ enable_position_increments?: boolean escape?: boolean + /** Array of fields to search. Supports wildcards (`*`). */ fields?: Field[] + /** Maximum edit distance allowed for fuzzy matching. */ fuzziness?: Fuzziness + /** Maximum number of terms to which the query expands for fuzzy matching. */ fuzzy_max_expansions?: integer + /** Number of beginning characters left unchanged for fuzzy matching. */ fuzzy_prefix_length?: integer + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Maximum number of positions allowed between matching tokens for phrases. */ phrase_slop?: double + /** Query string you wish to parse and use for search. */ query: string + /** Analyzer used to convert quoted text in the query string into tokens. + * For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. */ quote_analyzer?: string + /** Suffix appended to quoted text in the query string. + * You can use this suffix to use a different analysis method for exact matches. */ quote_field_suffix?: string + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** How to combine the queries generated from the individual search terms in the resulting `dis_max` query. */ tie_breaker?: double + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert date values in the query string to UTC. */ time_zone?: TimeZone + /** Determines how the query matches and scores documents. */ type?: QueryDslTextQueryType } @@ -6637,10 +9554,15 @@ export interface QueryDslRandomScoreFunction { export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery export interface QueryDslRangeQueryBase extends QueryDslQueryBase { + /** Indicates how the range query matches values for `range` fields. */ relation?: QueryDslRangeRelation + /** Greater than. */ gt?: T + /** Greater than or equal to. */ gte?: T + /** Less than. */ lt?: T + /** Less than or equal to. */ lte?: T from?: T | null to?: T | null @@ -6655,31 +9577,46 @@ export interface QueryDslRankFeatureFunctionLinear { } export interface QueryDslRankFeatureFunctionLogarithm { + /** Configurable scaling factor. */ scaling_factor: float } export interface QueryDslRankFeatureFunctionSaturation { + /** Configurable pivot value so that the result will be less than 0.5. */ pivot?: float } export interface QueryDslRankFeatureFunctionSigmoid { + /** Configurable pivot value so that the result will be less than 0.5. */ pivot: float + /** Configurable Exponent. */ exponent: float } export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { + /** `rank_feature` or `rank_features` field used to boost relevance scores. */ field: Field + /** Saturation function used to boost relevance scores based on the value of the rank feature `field`. */ saturation?: QueryDslRankFeatureFunctionSaturation + /** Logarithmic function used to boost relevance scores based on the value of the rank feature `field`. */ log?: QueryDslRankFeatureFunctionLogarithm + /** Linear function used to boost relevance scores based on the value of the rank feature `field`. */ linear?: QueryDslRankFeatureFunctionLinear + /** Sigmoid function used to boost relevance scores based on the value of the rank feature `field`. */ sigmoid?: QueryDslRankFeatureFunctionSigmoid } export interface QueryDslRegexpQuery extends QueryDslQueryBase { + /** Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. + * When `false`, case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean + /** Enables optional operators for the regular expression. */ flags?: string + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Regular expression for terms you wish to find in the provided field. */ value: string } @@ -6691,31 +9628,44 @@ export interface QueryDslRuleQuery extends QueryDslQueryBase { } export interface QueryDslScriptQuery extends QueryDslQueryBase { + /** Contains a script to run as a query. + * This script must return a boolean value, `true` or `false`. */ script: Script | string } export interface QueryDslScriptScoreFunction { + /** A script that computes a score. */ script: Script | string } export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { + /** Documents with a score lower than this floating point number are excluded from the search results. */ min_score?: float + /** Query used to return documents. */ query: QueryDslQueryContainer + /** Script used to compute the score of documents returned by the query. + * Important: final relevance scores from the `script_score` query cannot be negative. */ script: Script | string } export interface QueryDslSemanticQuery extends QueryDslQueryBase { + /** The field to query, which must be a semantic_text field type */ field: string + /** The query text */ query: string } export interface QueryDslShapeFieldQuery { + /** Queries using a pre-indexed shape. */ indexed_shape?: QueryDslFieldLookup + /** Spatial relation between the query shape and the document shape. */ relation?: GeoShapeRelation + /** Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) format. */ shape?: GeoShape } export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { + /** When set to `true` the query ignores an unmapped field and will not match any documents. */ ignore_unmapped?: boolean } export type QueryDslShapeQuery = QueryDslShapeQueryKeys @@ -6726,23 +9676,43 @@ export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PRE export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { + /** Analyzer used to convert text in the query string into tokens. */ analyzer?: string + /** If `true`, the query attempts to analyze wildcard terms in the query string. */ analyze_wildcard?: boolean + /** If `true`, the parser creates a match_phrase query for each multi-position token. */ auto_generate_synonyms_phrase_query?: boolean + /** Default boolean logic used to interpret text in the query string if no operators are specified. */ default_operator?: QueryDslOperator + /** Array of fields you wish to search. + * Accepts wildcard expressions. + * You also can boost relevance scores for matches to particular fields using a caret (`^`) notation. + * Defaults to the `index.query.default_field index` setting, which has a default value of `*`. */ fields?: Field[] + /** List of enabled operators for the simple query string syntax. */ flags?: QueryDslSimpleQueryStringFlags + /** Maximum number of terms to which the query expands for fuzzy matching. */ fuzzy_max_expansions?: integer + /** Number of beginning characters left unchanged for fuzzy matching. */ fuzzy_prefix_length?: integer + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ lenient?: boolean + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Query string in the simple query string syntax you wish to parse and use for search. */ query: string + /** Suffix appended to quoted text in the query string. */ quote_field_suffix?: string } export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { + /** Can be any span query. + * Matching spans from `big` that contain matches from `little` are returned. */ big: QueryDslSpanQuery + /** Can be any span query. + * Matching spans from `big` that contain matches from `little` are returned. */ little: QueryDslSpanQuery } @@ -6752,44 +9722,66 @@ export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { } export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { + /** Controls the maximum end position permitted in a match. */ end: integer + /** Can be any other span type query. */ match: QueryDslSpanQuery } export type QueryDslSpanGapQuery = Partial> export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { + /** Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). */ match: QueryDslQueryContainer } export interface QueryDslSpanNearQuery extends QueryDslQueryBase { + /** Array of one or more other span type queries. */ clauses: QueryDslSpanQuery[] + /** Controls whether matches are required to be in-order. */ in_order?: boolean + /** Controls the maximum number of intervening unmatched positions permitted. */ slop?: integer } export interface QueryDslSpanNotQuery extends QueryDslQueryBase { + /** The number of tokens from within the include span that can’t have overlap with the exclude span. + * Equivalent to setting both `pre` and `post`. */ dist?: integer + /** Span query whose matches must not overlap those returned. */ exclude: QueryDslSpanQuery + /** Span query whose matches are filtered. */ include: QueryDslSpanQuery + /** The number of tokens after the include span that can’t have overlap with the exclude span. */ post?: integer + /** The number of tokens before the include span that can’t have overlap with the exclude span. */ pre?: integer } export interface QueryDslSpanOrQuery extends QueryDslQueryBase { + /** Array of one or more other span type queries. */ clauses: QueryDslSpanQuery[] } export interface QueryDslSpanQuery { + /** Accepts a list of span queries, but only returns those spans which also match a second span query. */ span_containing?: QueryDslSpanContainingQuery + /** Allows queries like `span_near` or `span_or` across different fields. */ span_field_masking?: QueryDslSpanFieldMaskingQuery + /** Accepts another span query whose matches must appear within the first N positions of the field. */ span_first?: QueryDslSpanFirstQuery span_gap?: QueryDslSpanGapQuery + /** Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. */ span_multi?: QueryDslSpanMultiTermQuery + /** Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. */ span_near?: QueryDslSpanNearQuery + /** Wraps another span query, and excludes any documents which match that query. */ span_not?: QueryDslSpanNotQuery + /** Combines multiple span queriesandreturns documents which match any of the specified queries. */ span_or?: QueryDslSpanOrQuery + /** The equivalent of the `term` query but for use with other span queries. */ span_term?: Partial> + /** The result from a single span query is returned as long is its span falls within the spans returned by a list of other span queries. */ span_within?: QueryDslSpanWithinQuery } @@ -6798,21 +9790,46 @@ export interface QueryDslSpanTermQuery extends QueryDslQueryBase { } export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { + /** Can be any span query. + * Matching spans from `little` that are enclosed within `big` are returned. */ big: QueryDslSpanQuery + /** Can be any span query. + * Matching spans from `little` that are enclosed within `big` are returned. */ little: QueryDslSpanQuery } export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { + /** The name of the field that contains the token-weight pairs to be searched against. + * This field must be a mapped sparse_vector field. */ field: Field + /** Dictionary of precomputed sparse vectors and their associated weights. + * Only one of inference_id or query_vector may be supplied in a request. */ query_vector?: Record + /** The inference ID to use to convert the query text into token-weight pairs. + * It must be the same inference ID that was used to create the tokens from the input text. + * Only one of inference_id and query_vector is allowed. + * If inference_id is specified, query must also be specified. + * Only one of inference_id or query_vector may be supplied in a request. */ inference_id?: Id + /** The query text you want to use for search. + * If inference_id is specified, query must also be specified. */ query?: string + /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. + * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. + * Default: false */ prune?: boolean + /** Optional pruning configuration. + * If enabled, this will omit non-significant tokens from the query in order to improve query performance. + * This is only used if prune is set to true. + * If prune is set to true but pruning_config is not specified, default values will be used. */ pruning_config?: TokenPruningConfig } export interface QueryDslTermQuery extends QueryDslQueryBase { + /** Term you wish to find in the provided field. */ value: FieldValue + /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. + * When `false`, the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean } @@ -6834,15 +9851,23 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup export interface QueryDslTermsSetQuery extends QueryDslQueryBase { + /** Specification describing number of matching terms required to return a document. */ minimum_should_match?: MinimumShouldMatch + /** Numeric field containing the number of matching terms required to return a document. */ minimum_should_match_field?: Field + /** Custom script containing the number of matching terms required to return a document. */ minimum_should_match_script?: Script | string + /** Array of terms you wish to find in the provided field. */ terms: string[] } export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { + /** The text expansion NLP model to use */ model_id: string + /** The query text */ model_text: string + /** Token pruning configurations + * @experimental */ pruning_config?: TokenPruningConfig } @@ -6861,38 +9886,53 @@ export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFea } export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { + /** Date format used to convert `date` values in the query. */ format?: DateFormat + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ time_zone?: TimeZone } export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { + /** The tokens representing this query */ tokens: Record + /** Token pruning configurations */ pruning_config?: TokenPruningConfig } export interface QueryDslWildcardQuery extends QueryDslQueryBase { + /** Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. */ value?: string + /** Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. */ wildcard?: string } export interface QueryDslWrapperQuery extends QueryDslQueryBase { + /** A base64 encoded query. + * The binary data format can be any of JSON, YAML, CBOR or SMILE encodings */ query: string } export type QueryDslZeroTermsQuery = 'all' | 'none' export interface AsyncSearchAsyncSearch> { + /** Partial aggregations results, coming from the shards that have already completed running the query. */ aggregations?: TAggregations _clusters?: ClusterStatistics fields?: Record hits: SearchHitsMetadata max_score?: double + /** Indicates how many reductions of the results have been performed. + * If this number increases compared to the last retrieved results for a get asynch search request, you can expect additional results included in the search response. */ num_reduce_phases?: long profile?: SearchProfile pit_id?: Id _scroll_id?: ScrollId + /** Indicates how many shards have run the query. + * Note that in order for shard results to be included in the search response, they need to be reduced first. */ _shards: ShardStatistics suggest?: Record[]> terminated_early?: boolean @@ -6906,122 +9946,254 @@ export interface AsyncSearchAsyncSearchDocumentResponseBase info + * > If the search failed after some shards returned their results or the node that is coordinating the async search dies, results may be partial even though `is_running` is `false`. */ is_running: boolean + /** Indicates when the async search will expire. */ expiration_time?: DateTime expiration_time_in_millis: EpochTime start_time?: DateTime start_time_in_millis: EpochTime + /** Indicates when the async search completed. + * It is present only when the search has completed. */ completion_time?: DateTime completion_time_in_millis?: EpochTime } export interface AsyncSearchDeleteRequest extends RequestBase { + /** A unique identifier for the async search. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { + /** A unique identifier for the async search. */ id: Id + /** The length of time that the async search should be available in the cluster. + * When not specified, the `keep_alive` set with the corresponding submit async request will be used. + * Otherwise, it is possible to override the value and extend the validity of the request. + * When this period expires, the search, if still running, is cancelled. + * If the search is completed, its saved results are deleted. */ keep_alive?: Duration + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean + /** Specifies to wait for the search to be completed up until the provided timeout. + * Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. + * By default no timeout is set meaning that the currently available results will be returned without any additional wait. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, typed_keys?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never, typed_keys?: never, wait_for_completion_timeout?: never } } export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AsyncSearchStatusRequest extends RequestBase { + /** A unique identifier for the async search. */ id: Id + /** The length of time that the async search needs to be available. + * Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never } } export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { + /** The number of shards that have run the query so far. */ _shards: ShardStatistics + /** Metadata about clusters involved in the cross-cluster search. + * It is not shown for local-only searches. */ _clusters?: ClusterStatistics + /** If the async search completed, this field shows the status code of the search. + * For example, `200` indicates that the async search was successfully completed. + * `503` indicates that the async search was completed with an error. */ completion_status?: integer } export interface AsyncSearchSubmitRequest extends RequestBase { + /** A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices */ index?: Indices + /** Blocks and waits until the search is completed up to a certain timeout. + * When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. */ wait_for_completion_timeout?: Duration + /** Specifies how long the async search needs to be available. + * Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration + /** If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. */ keep_on_completion?: boolean + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Indicate if an error should be returned if there is a partial search failure or timeout */ allow_partial_search_results?: boolean + /** The analyzer to use for the query string */ analyzer?: string + /** Specify whether wildcard and prefix queries should be analyzed (default: false) */ analyze_wildcard?: boolean + /** Affects how often partial results become available, which happens whenever shard results are reduced. + * A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ batched_reduce_size?: long + /** The default value is the only supported value. */ ccs_minimize_roundtrips?: boolean + /** The default operator for query string query (AND or OR) */ default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string */ df?: string + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Whether specified concrete, expanded or aliased indices should be ignored when throttled */ ignore_throttled?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** Specify whether format-based query failures (such as providing text to a numeric field) should be ignored */ lenient?: boolean + /** The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests */ max_concurrent_shard_requests?: long + /** Specify the node or shard the operation should be performed on (default: random) */ preference?: string + /** Specify if request cache should be used for this request or not, defaults to true */ request_cache?: boolean + /** A comma-separated list of specific routing values */ routing?: Routing + /** Search operation type */ search_type?: SearchType + /** Specifies which field to use for suggestions. */ suggest_field?: Field + /** Specify suggest mode */ suggest_mode?: SuggestMode + /** How many suggestions to return in response */ suggest_size?: long + /** The source text for which the suggestions should be returned. */ suggest_text?: string + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean + /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ rest_total_hits_as_int?: boolean + /** A list of fields to exclude from the returned _source field */ _source_excludes?: Fields + /** A list of fields to extract and return from the _source field */ _source_includes?: Fields + /** Query in the Lucene query string syntax */ q?: string aggregations?: Record /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse + /** If true, returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ from?: integer highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits + /** Boosts the _score of documents from specified indices. */ indices_boost?: Partial>[] + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines the approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] + /** Minimum _score for matching documents. Documents with a lower _score are + * not included in search results and results collected by aggregations. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean + /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] + /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ size?: integer slice?: SlicedScroll sort?: Sort + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string + /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If true, returns document version as part of a hit. */ version?: boolean + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ pit?: SearchPointInTimeReference + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AutoscalingAutoscalingPolicy { roles: string[] + /** Decider settings. */ deciders: Record } export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { + /** the name of the autoscaling policy */ name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type AutoscalingDeleteAutoscalingPolicyResponse = AcknowledgedResponseBase @@ -7054,7 +10226,13 @@ export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { } export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface AutoscalingGetAutoscalingCapacityResponse { @@ -7062,24 +10240,55 @@ export interface AutoscalingGetAutoscalingCapacityResponse { } export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { + /** the name of the autoscaling policy */ name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { + /** the name of the autoscaling policy */ name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration policy?: AutoscalingAutoscalingPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } } export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase +export type CatCatAliasesColumn = 'alias' | 'a' | 'index' | 'i' | 'idx' | 'filter' | 'f' | 'fi' | 'routing.index' | 'ri' | 'routingIndex' | 'routing.search' | 'rs' | 'routingSearch' | 'is_write_index' | 'w' | 'isWriteIndex' | string + +export type CatCatAliasesColumns = CatCatAliasesColumn | CatCatAliasesColumn[] + +export type CatCatAllocationColumn = 'shards' | 's' | 'shards.undesired' | 'write_load.forecast' | 'wlf' | 'writeLoadForecast' | 'disk.indices.forecast' | 'dif' | 'diskIndicesForecast' | 'disk.indices' | 'di' | 'diskIndices' | 'disk.used' | 'du' | 'diskUsed' | 'disk.avail' | 'da' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.percent' | 'dp' | 'diskPercent' | 'host' | 'h' | 'ip' | 'node' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | string + +export type CatCatAllocationColumns = CatCatAllocationColumn | CatCatAllocationColumn[] + export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' -export type CatCatAnonalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] +export type CatCatAnomalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] + +export type CatCatComponentColumn = 'name' | 'n' | 'version' | 'v' | 'alias_count' | 'a' | 'mapping_count' | 'm' | 'settings_count' | 's' | 'metadata_count' | 'me' | 'included_in' | 'i' | string + +export type CatCatComponentColumns = CatCatComponentColumn | CatCatComponentColumn[] + +export type CatCatCountColumn = 'epoch' | 't' | 'time' | 'timestamp' | 'ts' | 'hms' | 'hhmmss' | 'count' | 'dc' | 'docs.count' | 'docsCount' | string + +export type CatCatCountColumns = CatCatCountColumn | CatCatCountColumn[] export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' @@ -7089,6 +10298,10 @@ export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] +export type CatCatFieldDataColumn = 'id' | 'host' | 'h' | 'ip' | 'node' | 'n' | 'field' | 'f' | 'size' | 's' | string + +export type CatCatFieldDataColumns = CatCatFieldDataColumn | CatCatFieldDataColumn[] + export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] @@ -7125,78 +10338,208 @@ export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'ch export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] export interface CatAliasesAliasesRecord { + /** alias name */ alias?: string + /** alias name + * @alias alias */ a?: string + /** index alias points to */ index?: IndexName + /** index alias points to + * @alias index */ i?: IndexName + /** index alias points to + * @alias index */ idx?: IndexName + /** filter */ filter?: string + /** filter + * @alias filter */ f?: string + /** filter + * @alias filter */ fi?: string + /** index routing */ 'routing.index'?: string + /** index routing + * @alias 'routing.index' */ ri?: string + /** index routing + * @alias 'routing.index' */ routingIndex?: string + /** search routing */ 'routing.search'?: string + /** search routing + * @alias 'routing.search' */ rs?: string + /** search routing + * @alias 'routing.search' */ routingSearch?: string + /** write index */ is_write_index?: string + /** write index + * @alias is_write_index */ w?: string + /** write index + * @alias is_write_index */ isWriteIndex?: string } export interface CatAliasesRequest extends CatCatRequestBase { + /** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatAliasesColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, expand_wildcards?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, expand_wildcards?: never, local?: never } } export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { + /** Number of primary and replica shards assigned to the node. */ shards?: string + /** Number of primary and replica shards assigned to the node. + * @alias shards */ s?: string + /** Amount of shards that are scheduled to be moved elsewhere in the cluster or -1 other than desired balance allocator is used */ 'shards.undesired'?: string | null + /** Sum of index write load forecasts */ 'write_load.forecast'?: SpecUtilsStringified | null + /** Sum of index write load forecasts + * @alias 'write_load.forecast' */ wlf?: SpecUtilsStringified | null + /** Sum of index write load forecasts + * @alias 'write_load.forecast' */ writeLoadForecast?: SpecUtilsStringified | null + /** Sum of shard size forecasts */ 'disk.indices.forecast'?: ByteSize | null + /** Sum of shard size forecasts + * @alias 'disk.indices.forecast' */ dif?: ByteSize | null + /** Sum of shard size forecasts + * @alias 'disk.indices.forecast' */ diskIndicesForecast?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. */ 'disk.indices'?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. + * @alias 'disk.indices' */ di?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. + * @alias 'disk.indices' */ diskIndices?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. */ 'disk.used'?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. + * @alias 'disk.used' */ du?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. + * @alias 'disk.used' */ diskUsed?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. */ 'disk.avail'?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. + * @alias 'disk.avail' */ da?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. + * @alias 'disk.avail' */ diskAvail?: ByteSize | null + /** Total disk space for the node, including in-use and available space. */ 'disk.total'?: ByteSize | null + /** Total disk space for the node, including in-use and available space. + * @alias 'disk.total' */ dt?: ByteSize | null + /** Total disk space for the node, including in-use and available space. + * @alias 'disk.total' */ diskTotal?: ByteSize | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. */ 'disk.percent'?: Percentage | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. + * @alias 'disk.percent' */ dp?: Percentage | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. + * @alias 'disk.percent' */ diskPercent?: Percentage | null + /** Network host for the node. Set using the `network.host` setting. */ host?: Host | null + /** Network host for the node. Set using the `network.host` setting. + * @alias host */ h?: Host | null + /** IP address and port for the node. */ ip?: Ip | null + /** Name for the node. Set using the `node.name` setting. */ node?: string + /** Name for the node. Set using the `node.name` setting. + * @alias node */ n?: string + /** Node roles */ 'node.role'?: string | null + /** Node roles + * @alias 'node.role' */ r?: string | null + /** Node roles + * @alias 'node.role' */ role?: string | null + /** Node roles + * @alias 'node.role' */ nodeRole?: string | null } export interface CatAllocationRequest extends CatCatRequestBase { + /** A comma-separated list of node identifiers or names used to limit the returned information. */ node_id?: NodeIds + /** The unit used to display byte values. */ bytes?: Bytes - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatAllocationColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatAllocationResponse = CatAllocationAllocationRecord[] @@ -7212,1845 +10555,4763 @@ export interface CatComponentTemplatesComponentTemplate { } export interface CatComponentTemplatesRequest extends CatCatRequestBase { + /** The name of the component template. + * It accepts wildcard expressions. + * If it is omitted, all component templates are returned. */ name?: string - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatComponentColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** The period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] export interface CatCountCountRecord { + /** seconds since 1970-01-01 00:00:00 */ epoch?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ t?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ time?: SpecUtilsStringified> + /** time in HH:MM:SS */ timestamp?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ ts?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hms?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hhmmss?: TimeOfDay + /** the document count */ count?: string + /** the document count + * @alias count */ dc?: string + /** the document count + * @alias count */ 'docs.count'?: string + /** the document count + * @alias count */ docsCount?: string } export interface CatCountRequest extends CatCatRequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * It supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatCountColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never } } export type CatCountResponse = CatCountCountRecord[] export interface CatFielddataFielddataRecord { + /** node id */ id?: string + /** host name */ host?: string + /** host name + * @alias host */ h?: string + /** ip address */ ip?: string + /** node name */ node?: string + /** node name + * @alias node */ n?: string + /** field name */ field?: string + /** field name + * @alias field */ f?: string + /** field data usage */ size?: string } export interface CatFielddataRequest extends CatCatRequestBase { + /** Comma-separated list of fields used to limit returned information. + * To retrieve all fields, omit this parameter. */ fields?: Fields + /** The unit used to display byte values. */ bytes?: Bytes - h?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatFieldDataColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fields?: never, bytes?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fields?: never, bytes?: never, h?: never, s?: never } } export type CatFielddataResponse = CatFielddataFielddataRecord[] export interface CatHealthHealthRecord { + /** seconds since 1970-01-01 00:00:00 */ epoch?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ time?: SpecUtilsStringified> + /** time in HH:MM:SS */ timestamp?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ ts?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hms?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hhmmss?: TimeOfDay + /** cluster name */ cluster?: string + /** cluster name + * @alias cluster */ cl?: string + /** health status */ status?: string + /** health status + * @alias status */ st?: string + /** total number of nodes */ 'node.total'?: string + /** total number of nodes + * @alias 'node.total' */ nt?: string + /** total number of nodes + * @alias 'node.total' */ nodeTotal?: string + /** number of nodes that can store data */ 'node.data'?: string + /** number of nodes that can store data + * @alias 'node.data' */ nd?: string + /** number of nodes that can store data + * @alias 'node.data' */ nodeData?: string + /** total number of shards */ shards?: string + /** total number of shards + * @alias shards */ t?: string + /** total number of shards + * @alias shards */ sh?: string + /** total number of shards + * @alias shards */ 'shards.total'?: string + /** total number of shards + * @alias shards */ shardsTotal?: string + /** number of primary shards */ pri?: string + /** number of primary shards + * @alias pri */ p?: string + /** number of primary shards + * @alias pri */ 'shards.primary'?: string + /** number of primary shards + * @alias pri */ shardsPrimary?: string + /** number of relocating nodes */ relo?: string + /** number of relocating nodes + * @alias relo */ r?: string + /** number of relocating nodes + * @alias relo */ 'shards.relocating'?: string + /** number of relocating nodes + * @alias relo */ shardsRelocating?: string + /** number of initializing nodes */ init?: string + /** number of initializing nodes + * @alias init */ i?: string + /** number of initializing nodes + * @alias init */ 'shards.initializing'?: string + /** number of initializing nodes + * @alias init */ shardsInitializing?: string + /** number of unassigned primary shards */ 'unassign.pri'?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ up?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ 'shards.unassigned.primary'?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ shardsUnassignedPrimary?: string + /** number of unassigned shards */ unassign?: string + /** number of unassigned shards + * @alias unassign */ u?: string + /** number of unassigned shards + * @alias unassign */ 'shards.unassigned'?: string + /** number of unassigned shards + * @alias unassign */ shardsUnassigned?: string + /** number of pending tasks */ pending_tasks?: string + /** number of pending tasks + * @alias pending_tasks */ pt?: string + /** number of pending tasks + * @alias pending_tasks */ pendingTasks?: string + /** wait time of longest task pending */ max_task_wait_time?: string + /** wait time of longest task pending + * @alias max_task_wait_time */ mtwt?: string + /** wait time of longest task pending + * @alias max_task_wait_time */ maxTaskWaitTime?: string + /** active number of shards in percent */ active_shards_percent?: string + /** active number of shards in percent + * @alias active_shards_percent */ asp?: string + /** active number of shards in percent + * @alias active_shards_percent */ activeShardsPercent?: string } export interface CatHealthRequest extends CatCatRequestBase { + /** The unit used to display time values. */ time?: TimeUnit + /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ ts?: boolean + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { time?: never, ts?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { time?: never, ts?: never, h?: never, s?: never } } export type CatHealthResponse = CatHealthHealthRecord[] export interface CatHelpRequest { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface CatHelpResponse { } export interface CatIndicesIndicesRecord { + /** current health status */ health?: string + /** current health status + * @alias health */ h?: string + /** open/close status */ status?: string + /** open/close status + * @alias status */ s?: string + /** index name */ index?: string + /** index name + * @alias index */ i?: string + /** index name + * @alias index */ idx?: string + /** index uuid */ uuid?: string + /** index uuid + * @alias uuid */ id?: string + /** number of primary shards */ pri?: string + /** number of primary shards + * @alias pri */ p?: string + /** number of primary shards + * @alias pri */ 'shards.primary'?: string + /** number of primary shards + * @alias pri */ shardsPrimary?: string + /** number of replica shards */ rep?: string + /** number of replica shards + * @alias rep */ r?: string + /** number of replica shards + * @alias rep */ 'shards.replica'?: string + /** number of replica shards + * @alias rep */ shardsReplica?: string + /** available docs */ 'docs.count'?: string | null + /** available docs + * @alias 'docs.count' */ dc?: string | null + /** available docs + * @alias 'docs.count' */ docsCount?: string | null + /** deleted docs */ 'docs.deleted'?: string | null + /** deleted docs + * @alias 'docs.deleted' */ dd?: string | null + /** deleted docs + * @alias 'docs.deleted' */ docsDeleted?: string | null + /** index creation date (millisecond value) */ 'creation.date'?: string + /** index creation date (millisecond value) + * @alias 'creation.date' */ cd?: string + /** index creation date (as string) */ 'creation.date.string'?: string + /** index creation date (as string) + * @alias 'creation.date.string' */ cds?: string + /** store size of primaries & replicas */ 'store.size'?: string | null + /** store size of primaries & replicas + * @alias 'store.size' */ ss?: string | null + /** store size of primaries & replicas + * @alias 'store.size' */ storeSize?: string | null + /** store size of primaries */ 'pri.store.size'?: string | null + /** total size of dataset (including the cache for partially mounted indices) */ 'dataset.size'?: string | null + /** size of completion */ 'completion.size'?: string + /** size of completion + * @alias 'completion.size' */ cs?: string + /** size of completion + * @alias 'completion.size' */ completionSize?: string + /** size of completion */ 'pri.completion.size'?: string + /** used fielddata cache */ 'fielddata.memory_size'?: string + /** used fielddata cache + * @alias 'fielddata.memory_size' */ fm?: string + /** used fielddata cache + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** used fielddata cache */ 'pri.fielddata.memory_size'?: string + /** fielddata evictions */ 'fielddata.evictions'?: string + /** fielddata evictions + * @alias 'fielddata.evictions' */ fe?: string + /** fielddata evictions + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** fielddata evictions */ 'pri.fielddata.evictions'?: string + /** used query cache */ 'query_cache.memory_size'?: string + /** used query cache + * @alias 'query_cache.memory_size' */ qcm?: string + /** used query cache + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** used query cache */ 'pri.query_cache.memory_size'?: string + /** query cache evictions */ 'query_cache.evictions'?: string + /** query cache evictions + * @alias 'query_cache.evictions' */ qce?: string + /** query cache evictions + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** query cache evictions */ 'pri.query_cache.evictions'?: string + /** used request cache */ 'request_cache.memory_size'?: string + /** used request cache + * @alias 'request_cache.memory_size' */ rcm?: string + /** used request cache + * @alias 'request_cache.memory_size' */ requestCacheMemory?: string + /** used request cache */ 'pri.request_cache.memory_size'?: string + /** request cache evictions */ 'request_cache.evictions'?: string + /** request cache evictions + * @alias 'request_cache.evictions' */ rce?: string + /** request cache evictions + * @alias 'request_cache.evictions' */ requestCacheEvictions?: string + /** request cache evictions */ 'pri.request_cache.evictions'?: string + /** request cache hit count */ 'request_cache.hit_count'?: string + /** request cache hit count + * @alias 'request_cache.hit_count' */ rchc?: string + /** request cache hit count + * @alias 'request_cache.hit_count' */ requestCacheHitCount?: string + /** request cache hit count */ 'pri.request_cache.hit_count'?: string + /** request cache miss count */ 'request_cache.miss_count'?: string + /** request cache miss count + * @alias 'request_cache.miss_count' */ rcmc?: string + /** request cache miss count + * @alias 'request_cache.miss_count' */ requestCacheMissCount?: string + /** request cache miss count */ 'pri.request_cache.miss_count'?: string + /** number of flushes */ 'flush.total'?: string + /** number of flushes + * @alias 'flush.total' */ ft?: string + /** number of flushes + * @alias 'flush.total' */ flushTotal?: string + /** number of flushes */ 'pri.flush.total'?: string + /** time spent in flush */ 'flush.total_time'?: string + /** time spent in flush + * @alias 'flush.total_time' */ ftt?: string + /** time spent in flush + * @alias 'flush.total_time' */ flushTotalTime?: string + /** time spent in flush */ 'pri.flush.total_time'?: string + /** number of current get ops */ 'get.current'?: string + /** number of current get ops + * @alias 'get.current' */ gc?: string + /** number of current get ops + * @alias 'get.current' */ getCurrent?: string + /** number of current get ops */ 'pri.get.current'?: string + /** time spent in get */ 'get.time'?: string + /** time spent in get + * @alias 'get.time' */ gti?: string + /** time spent in get + * @alias 'get.time' */ getTime?: string + /** time spent in get */ 'pri.get.time'?: string + /** number of get ops */ 'get.total'?: string + /** number of get ops + * @alias 'get.total' */ gto?: string + /** number of get ops + * @alias 'get.total' */ getTotal?: string + /** number of get ops */ 'pri.get.total'?: string + /** time spent in successful gets */ 'get.exists_time'?: string + /** time spent in successful gets + * @alias 'get.exists_time' */ geti?: string + /** time spent in successful gets + * @alias 'get.exists_time' */ getExistsTime?: string + /** time spent in successful gets */ 'pri.get.exists_time'?: string + /** number of successful gets */ 'get.exists_total'?: string + /** number of successful gets + * @alias 'get.exists_total' */ geto?: string + /** number of successful gets + * @alias 'get.exists_total' */ getExistsTotal?: string + /** number of successful gets */ 'pri.get.exists_total'?: string + /** time spent in failed gets */ 'get.missing_time'?: string + /** time spent in failed gets + * @alias 'get.missing_time' */ gmti?: string + /** time spent in failed gets + * @alias 'get.missing_time' */ getMissingTime?: string + /** time spent in failed gets */ 'pri.get.missing_time'?: string + /** number of failed gets */ 'get.missing_total'?: string + /** number of failed gets + * @alias 'get.missing_total' */ gmto?: string + /** number of failed gets + * @alias 'get.missing_total' */ getMissingTotal?: string + /** number of failed gets */ 'pri.get.missing_total'?: string + /** number of current deletions */ 'indexing.delete_current'?: string + /** number of current deletions + * @alias 'indexing.delete_current' */ idc?: string + /** number of current deletions + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** number of current deletions */ 'pri.indexing.delete_current'?: string + /** time spent in deletions */ 'indexing.delete_time'?: string + /** time spent in deletions + * @alias 'indexing.delete_time' */ idti?: string + /** time spent in deletions + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** time spent in deletions */ 'pri.indexing.delete_time'?: string + /** number of delete ops */ 'indexing.delete_total'?: string + /** number of delete ops + * @alias 'indexing.delete_total' */ idto?: string + /** number of delete ops + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** number of delete ops */ 'pri.indexing.delete_total'?: string + /** number of current indexing ops */ 'indexing.index_current'?: string + /** number of current indexing ops + * @alias 'indexing.index_current' */ iic?: string + /** number of current indexing ops + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** number of current indexing ops */ 'pri.indexing.index_current'?: string + /** time spent in indexing */ 'indexing.index_time'?: string + /** time spent in indexing + * @alias 'indexing.index_time' */ iiti?: string + /** time spent in indexing + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** time spent in indexing */ 'pri.indexing.index_time'?: string + /** number of indexing ops */ 'indexing.index_total'?: string + /** number of indexing ops + * @alias 'indexing.index_total' */ iito?: string + /** number of indexing ops + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** number of indexing ops */ 'pri.indexing.index_total'?: string + /** number of failed indexing ops */ 'indexing.index_failed'?: string + /** number of failed indexing ops + * @alias 'indexing.index_failed' */ iif?: string + /** number of failed indexing ops + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** number of failed indexing ops */ 'pri.indexing.index_failed'?: string + /** number of current merges */ 'merges.current'?: string + /** number of current merges + * @alias 'merges.current' */ mc?: string + /** number of current merges + * @alias 'merges.current' */ mergesCurrent?: string + /** number of current merges */ 'pri.merges.current'?: string + /** number of current merging docs */ 'merges.current_docs'?: string + /** number of current merging docs + * @alias 'merges.current_docs' */ mcd?: string + /** number of current merging docs + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** number of current merging docs */ 'pri.merges.current_docs'?: string + /** size of current merges */ 'merges.current_size'?: string + /** size of current merges + * @alias 'merges.current_size' */ mcs?: string + /** size of current merges + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** size of current merges */ 'pri.merges.current_size'?: string + /** number of completed merge ops */ 'merges.total'?: string + /** number of completed merge ops + * @alias 'merges.total' */ mt?: string + /** number of completed merge ops + * @alias 'merges.total' */ mergesTotal?: string + /** number of completed merge ops */ 'pri.merges.total'?: string + /** docs merged */ 'merges.total_docs'?: string + /** docs merged + * @alias 'merges.total_docs' */ mtd?: string + /** docs merged + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** docs merged */ 'pri.merges.total_docs'?: string + /** size merged */ 'merges.total_size'?: string + /** size merged + * @alias 'merges.total_size' */ mts?: string + /** size merged + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** size merged */ 'pri.merges.total_size'?: string + /** time spent in merges */ 'merges.total_time'?: string + /** time spent in merges + * @alias 'merges.total_time' */ mtt?: string + /** time spent in merges + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** time spent in merges */ 'pri.merges.total_time'?: string + /** total refreshes */ 'refresh.total'?: string + /** total refreshes + * @alias 'refresh.total' */ rto?: string + /** total refreshes + * @alias 'refresh.total' */ refreshTotal?: string + /** total refreshes */ 'pri.refresh.total'?: string + /** time spent in refreshes */ 'refresh.time'?: string + /** time spent in refreshes + * @alias 'refresh.time' */ rti?: string + /** time spent in refreshes + * @alias 'refresh.time' */ refreshTime?: string + /** time spent in refreshes */ 'pri.refresh.time'?: string + /** total external refreshes */ 'refresh.external_total'?: string + /** total external refreshes + * @alias 'refresh.external_total' */ reto?: string + /** total external refreshes */ 'pri.refresh.external_total'?: string + /** time spent in external refreshes */ 'refresh.external_time'?: string + /** time spent in external refreshes + * @alias 'refresh.external_time' */ reti?: string + /** time spent in external refreshes */ 'pri.refresh.external_time'?: string + /** number of pending refresh listeners */ 'refresh.listeners'?: string + /** number of pending refresh listeners + * @alias 'refresh.listeners' */ rli?: string + /** number of pending refresh listeners + * @alias 'refresh.listeners' */ refreshListeners?: string + /** number of pending refresh listeners */ 'pri.refresh.listeners'?: string + /** current fetch phase ops */ 'search.fetch_current'?: string + /** current fetch phase ops + * @alias 'search.fetch_current' */ sfc?: string + /** current fetch phase ops + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** current fetch phase ops */ 'pri.search.fetch_current'?: string + /** time spent in fetch phase */ 'search.fetch_time'?: string + /** time spent in fetch phase + * @alias 'search.fetch_time' */ sfti?: string + /** time spent in fetch phase + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** time spent in fetch phase */ 'pri.search.fetch_time'?: string + /** total fetch ops */ 'search.fetch_total'?: string + /** total fetch ops + * @alias 'search.fetch_total' */ sfto?: string + /** total fetch ops + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** total fetch ops */ 'pri.search.fetch_total'?: string + /** open search contexts */ 'search.open_contexts'?: string + /** open search contexts + * @alias 'search.open_contexts' */ so?: string + /** open search contexts + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** open search contexts */ 'pri.search.open_contexts'?: string + /** current query phase ops */ 'search.query_current'?: string + /** current query phase ops + * @alias 'search.query_current' */ sqc?: string + /** current query phase ops + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** current query phase ops */ 'pri.search.query_current'?: string + /** time spent in query phase */ 'search.query_time'?: string + /** time spent in query phase + * @alias 'search.query_time' */ sqti?: string + /** time spent in query phase + * @alias 'search.query_time' */ searchQueryTime?: string + /** time spent in query phase */ 'pri.search.query_time'?: string + /** total query phase ops */ 'search.query_total'?: string + /** total query phase ops + * @alias 'search.query_total' */ sqto?: string + /** total query phase ops + * @alias 'search.query_total' */ searchQueryTotal?: string + /** total query phase ops */ 'pri.search.query_total'?: string + /** open scroll contexts */ 'search.scroll_current'?: string + /** open scroll contexts + * @alias 'search.scroll_current' */ scc?: string + /** open scroll contexts + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** open scroll contexts */ 'pri.search.scroll_current'?: string + /** time scroll contexts held open */ 'search.scroll_time'?: string + /** time scroll contexts held open + * @alias 'search.scroll_time' */ scti?: string + /** time scroll contexts held open + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** time scroll contexts held open */ 'pri.search.scroll_time'?: string + /** completed scroll contexts */ 'search.scroll_total'?: string + /** completed scroll contexts + * @alias 'search.scroll_total' */ scto?: string + /** completed scroll contexts + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** completed scroll contexts */ 'pri.search.scroll_total'?: string + /** number of segments */ 'segments.count'?: string + /** number of segments + * @alias 'segments.count' */ sc?: string + /** number of segments + * @alias 'segments.count' */ segmentsCount?: string + /** number of segments */ 'pri.segments.count'?: string + /** memory used by segments */ 'segments.memory'?: string + /** memory used by segments + * @alias 'segments.memory' */ sm?: string + /** memory used by segments + * @alias 'segments.memory' */ segmentsMemory?: string + /** memory used by segments */ 'pri.segments.memory'?: string + /** memory used by index writer */ 'segments.index_writer_memory'?: string + /** memory used by index writer + * @alias 'segments.index_writer_memory' */ siwm?: string + /** memory used by index writer + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** memory used by index writer */ 'pri.segments.index_writer_memory'?: string + /** memory used by version map */ 'segments.version_map_memory'?: string + /** memory used by version map + * @alias 'segments.version_map_memory' */ svmm?: string + /** memory used by version map + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** memory used by version map */ 'pri.segments.version_map_memory'?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ 'segments.fixed_bitset_memory'?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ 'pri.segments.fixed_bitset_memory'?: string + /** current warmer ops */ 'warmer.current'?: string + /** current warmer ops + * @alias 'warmer.current' */ wc?: string + /** current warmer ops + * @alias 'warmer.current' */ warmerCurrent?: string + /** current warmer ops */ 'pri.warmer.current'?: string + /** total warmer ops */ 'warmer.total'?: string + /** total warmer ops + * @alias 'warmer.total' */ wto?: string + /** total warmer ops + * @alias 'warmer.total' */ warmerTotal?: string + /** total warmer ops */ 'pri.warmer.total'?: string + /** time spent in warmers */ 'warmer.total_time'?: string + /** time spent in warmers + * @alias 'warmer.total_time' */ wtt?: string + /** time spent in warmers + * @alias 'warmer.total_time' */ warmerTotalTime?: string + /** time spent in warmers */ 'pri.warmer.total_time'?: string + /** number of current suggest ops */ 'suggest.current'?: string + /** number of current suggest ops + * @alias 'suggest.current' */ suc?: string + /** number of current suggest ops + * @alias 'suggest.current' */ suggestCurrent?: string + /** number of current suggest ops */ 'pri.suggest.current'?: string + /** time spend in suggest */ 'suggest.time'?: string + /** time spend in suggest + * @alias 'suggest.time' */ suti?: string + /** time spend in suggest + * @alias 'suggest.time' */ suggestTime?: string + /** time spend in suggest */ 'pri.suggest.time'?: string + /** number of suggest ops */ 'suggest.total'?: string + /** number of suggest ops + * @alias 'suggest.total' */ suto?: string + /** number of suggest ops + * @alias 'suggest.total' */ suggestTotal?: string + /** number of suggest ops */ 'pri.suggest.total'?: string + /** total used memory */ 'memory.total'?: string + /** total used memory + * @alias 'memory.total' */ tm?: string + /** total used memory + * @alias 'memory.total' */ memoryTotal?: string + /** total user memory */ 'pri.memory.total'?: string + /** indicates if the index is search throttled */ 'search.throttled'?: string + /** indicates if the index is search throttled + * @alias 'search.throttled' */ sth?: string + /** number of bulk shard ops */ 'bulk.total_operations'?: string + /** number of bulk shard ops + * @alias 'bulk.total_operations' */ bto?: string + /** number of bulk shard ops + * @alias 'bulk.total_operations' */ bulkTotalOperation?: string + /** number of bulk shard ops */ 'pri.bulk.total_operations'?: string + /** time spend in shard bulk */ 'bulk.total_time'?: string + /** time spend in shard bulk + * @alias 'bulk.total_time' */ btti?: string + /** time spend in shard bulk + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** time spend in shard bulk */ 'pri.bulk.total_time'?: string + /** total size in bytes of shard bulk */ 'bulk.total_size_in_bytes'?: string + /** total size in bytes of shard bulk + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** total size in bytes of shard bulk + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** total size in bytes of shard bulk */ 'pri.bulk.total_size_in_bytes'?: string + /** average time spend in shard bulk */ 'bulk.avg_time'?: string + /** average time spend in shard bulk + * @alias 'bulk.avg_time' */ bati?: string + /** average time spend in shard bulk + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** average time spend in shard bulk */ 'pri.bulk.avg_time'?: string + /** average size in bytes of shard bulk */ 'bulk.avg_size_in_bytes'?: string + /** average size in bytes of shard bulk + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** average size in bytes of shard bulk + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string + /** average size in bytes of shard bulk */ 'pri.bulk.avg_size_in_bytes'?: string } export interface CatIndicesRequest extends CatCatRequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** The unit used to display byte values. */ bytes?: Bytes + /** The type of index that wildcard patterns can match. */ expand_wildcards?: ExpandWildcards + /** The health status used to limit returned indices. By default, the response includes indices of any health status. */ health?: HealthStatus + /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** If true, the response only includes information from primary shards. */ pri?: boolean + /** The unit used to display time values. */ time?: TimeUnit + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never, h?: never, s?: never } } export type CatIndicesResponse = CatIndicesIndicesRecord[] export interface CatMasterMasterRecord { + /** node id */ id?: string + /** host name */ host?: string + /** host name + * @alias host */ h?: string + /** ip address */ ip?: string + /** node name */ node?: string + /** node name + * @alias node */ n?: string } export interface CatMasterRequest extends CatCatRequestBase { + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } } export type CatMasterResponse = CatMasterMasterRecord[] export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { + /** The identifier for the job. */ id?: Id + /** The type of analysis that the job performs. */ type?: string + /** The type of analysis that the job performs. + * @alias type */ t?: string + /** The time when the job was created. */ create_time?: string + /** The time when the job was created. + * @alias create_time */ ct?: string + /** The time when the job was created. + * @alias create_time */ createTime?: string + /** The version of Elasticsearch when the job was created. */ version?: VersionString + /** The version of Elasticsearch when the job was created. + * @alias version */ v?: VersionString + /** The name of the source index. */ source_index?: IndexName + /** The name of the source index. + * @alias source_index */ si?: IndexName + /** The name of the source index. + * @alias source_index */ sourceIndex?: IndexName + /** The name of the destination index. */ dest_index?: IndexName + /** The name of the destination index. + * @alias dest_index */ di?: IndexName + /** The name of the destination index. + * @alias dest_index */ destIndex?: IndexName + /** A description of the job. */ description?: string + /** A description of the job. + * @alias description */ d?: string + /** The approximate maximum amount of memory resources that are permitted for the job. */ model_memory_limit?: string + /** The approximate maximum amount of memory resources that are permitted for the job. + * @alias model_memory_limit */ mml?: string + /** The approximate maximum amount of memory resources that are permitted for the job. + * @alias model_memory_limit */ modelMemoryLimit?: string + /** The current status of the job. */ state?: string + /** The current status of the job. + * @alias state */ s?: string + /** Messages about the reason why the job failed. */ failure_reason?: string + /** Messages about the reason why the job failed. + * @alias failure_reason */ fr?: string + /** Messages about the reason why the job failed. + * @alias failure_reason */ failureReason?: string + /** The progress report for the job by phase. */ progress?: string + /** The progress report for the job by phase. + * @alias progress */ p?: string + /** Messages related to the selection of a node. */ assignment_explanation?: string + /** Messages related to the selection of a node. + * @alias assignment_explanation */ ae?: string + /** Messages related to the selection of a node. + * @alias assignment_explanation */ assignmentExplanation?: string + /** The unique identifier of the assigned node. */ 'node.id'?: Id + /** The unique identifier of the assigned node. + * @alias 'node.id' */ ni?: Id + /** The unique identifier of the assigned node. + * @alias 'node.id' */ nodeId?: Id + /** The name of the assigned node. */ 'node.name'?: Name + /** The name of the assigned node. + * @alias 'node.name' */ nn?: Name + /** The name of the assigned node. + * @alias 'node.name' */ nodeName?: Name + /** The ephemeral identifier of the assigned node. */ 'node.ephemeral_id'?: Id + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ ne?: Id + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: Id + /** The network address of the assigned node. */ 'node.address'?: string + /** The network address of the assigned node. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * @alias 'node.address' */ nodeAddress?: string } export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { + /** The ID of the data frame analytics to fetch */ id?: Id + /** Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) */ allow_no_match?: boolean + /** The unit in which to display byte values */ bytes?: Bytes + /** Comma-separated list of column names to display. */ h?: CatCatDfaColumns + /** Comma-separated list of column names or column aliases used to sort the + * response. */ s?: CatCatDfaColumns + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } } export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] export interface CatMlDatafeedsDatafeedsRecord { + /** The datafeed identifier. */ id?: string + /** The status of the datafeed. */ state?: MlDatafeedState + /** The status of the datafeed. + * @alias state */ s?: MlDatafeedState + /** For started datafeeds only, contains messages relating to the selection of a node. */ assignment_explanation?: string + /** For started datafeeds only, contains messages relating to the selection of a node. + * @alias assignment_explanation */ ae?: string + /** The number of buckets processed. */ 'buckets.count'?: string + /** The number of buckets processed. + * @alias 'buckets.count' */ bc?: string + /** The number of buckets processed. + * @alias 'buckets.count' */ bucketsCount?: string + /** The number of searches run by the datafeed. */ 'search.count'?: string + /** The number of searches run by the datafeed. + * @alias 'search.count' */ sc?: string + /** The number of searches run by the datafeed. + * @alias 'search.count' */ searchCount?: string + /** The total time the datafeed spent searching, in milliseconds. */ 'search.time'?: string + /** The total time the datafeed spent searching, in milliseconds. + * @alias 'search.time' */ st?: string + /** The total time the datafeed spent searching, in milliseconds. + * @alias 'search.time' */ searchTime?: string + /** The average search time per bucket, in milliseconds. */ 'search.bucket_avg'?: string + /** The average search time per bucket, in milliseconds. + * @alias 'search.bucket_avg' */ sba?: string + /** The average search time per bucket, in milliseconds. + * @alias 'search.bucket_avg' */ searchBucketAvg?: string + /** The exponential average search time per hour, in milliseconds. */ 'search.exp_avg_hour'?: string + /** The exponential average search time per hour, in milliseconds. + * @alias 'search.exp_avg_hour' */ seah?: string + /** The exponential average search time per hour, in milliseconds. + * @alias 'search.exp_avg_hour' */ searchExpAvgHour?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.id'?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.id' */ ni?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.id' */ nodeId?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.name'?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.name' */ nn?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.name' */ nodeName?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.ephemeral_id'?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.ephemeral_id' */ ne?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.address'?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.address' */ nodeAddress?: string } export interface CatMlDatafeedsRequest extends CatCatRequestBase { + /** A numerical character string that uniquely identifies the datafeed. */ datafeed_id?: Id + /** Specifies what to do when the request: + * + * * Contains wildcard expressions and there are no datafeeds that match. + * * Contains the `_all` string or no identifiers and there are no matches. + * * Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when + * there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only + * partial matches. */ allow_no_match?: boolean + /** Comma-separated list of column names to display. */ h?: CatCatDatafeedColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatDatafeedColumns + /** The unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never, time?: never } } export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] export interface CatMlJobsJobsRecord { + /** The anomaly detection job identifier. */ id?: Id + /** The status of the anomaly detection job. */ state?: MlJobState + /** The status of the anomaly detection job. + * @alias state */ s?: MlJobState + /** For open jobs only, the amount of time the job has been opened. */ opened_time?: string + /** For open jobs only, the amount of time the job has been opened. + * @alias opened_time */ ot?: string + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. + * @alias assignment_explanation */ ae?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. */ 'data.processed_records'?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. + * @alias 'data.processed_records' */ dpr?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. + * @alias 'data.processed_records' */ dataProcessedRecords?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. */ 'data.processed_fields'?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. + * @alias 'data.processed_fields' */ dpf?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. + * @alias 'data.processed_fields' */ dataProcessedFields?: string + /** The number of bytes of input data posted to the anomaly detection job. */ 'data.input_bytes'?: ByteSize + /** The number of bytes of input data posted to the anomaly detection job. + * @alias 'data.input_bytes' */ dib?: ByteSize + /** The number of bytes of input data posted to the anomaly detection job. + * @alias 'data.input_bytes' */ dataInputBytes?: ByteSize + /** The number of input documents posted to the anomaly detection job. */ 'data.input_records'?: string + /** The number of input documents posted to the anomaly detection job. + * @alias 'data.input_records' */ dir?: string + /** The number of input documents posted to the anomaly detection job. + * @alias 'data.input_records' */ dataInputRecords?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. */ 'data.input_fields'?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. + * @alias 'data.input_fields' */ dif?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. + * @alias 'data.input_fields' */ dataInputFields?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. */ 'data.invalid_dates'?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. + * @alias 'data.invalid_dates' */ did?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. + * @alias 'data.invalid_dates' */ dataInvalidDates?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. */ 'data.missing_fields'?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. + * @alias 'data.missing_fields' */ dmf?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. + * @alias 'data.missing_fields' */ dataMissingFields?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. */ 'data.out_of_order_timestamps'?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. + * @alias 'data.out_of_order_timestamps' */ doot?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. + * @alias 'data.out_of_order_timestamps' */ dataOutOfOrderTimestamps?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. */ 'data.empty_buckets'?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. + * @alias 'data.empty_buckets' */ deb?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. + * @alias 'data.empty_buckets' */ dataEmptyBuckets?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. */ 'data.sparse_buckets'?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. + * @alias 'data.sparse_buckets' */ dsb?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. + * @alias 'data.sparse_buckets' */ dataSparseBuckets?: string + /** The total number of buckets processed. */ 'data.buckets'?: string + /** The total number of buckets processed. + * @alias 'data.buckets' */ db?: string + /** The total number of buckets processed. + * @alias 'data.buckets' */ dataBuckets?: string + /** The timestamp of the earliest chronologically input document. */ 'data.earliest_record'?: string + /** The timestamp of the earliest chronologically input document. + * @alias 'data.earliest_record' */ der?: string + /** The timestamp of the earliest chronologically input document. + * @alias 'data.earliest_record' */ dataEarliestRecord?: string + /** The timestamp of the latest chronologically input document. */ 'data.latest_record'?: string + /** The timestamp of the latest chronologically input document. + * @alias 'data.latest_record' */ dlr?: string + /** The timestamp of the latest chronologically input document. + * @alias 'data.latest_record' */ dataLatestRecord?: string + /** The timestamp at which data was last analyzed, according to server time. */ 'data.last'?: string + /** The timestamp at which data was last analyzed, according to server time. + * @alias 'data.last' */ dl?: string + /** The timestamp at which data was last analyzed, according to server time. + * @alias 'data.last' */ dataLast?: string + /** The timestamp of the last bucket that did not contain any data. */ 'data.last_empty_bucket'?: string + /** The timestamp of the last bucket that did not contain any data. + * @alias 'data.last_empty_bucket' */ dleb?: string + /** The timestamp of the last bucket that did not contain any data. + * @alias 'data.last_empty_bucket' */ dataLastEmptyBucket?: string + /** The timestamp of the last bucket that was considered sparse. */ 'data.last_sparse_bucket'?: string + /** The timestamp of the last bucket that was considered sparse. + * @alias 'data.last_sparse_bucket' */ dlsb?: string + /** The timestamp of the last bucket that was considered sparse. + * @alias 'data.last_sparse_bucket' */ dataLastSparseBucket?: string + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. */ 'model.bytes'?: ByteSize + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. + * @alias 'model.bytes' */ mb?: ByteSize + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. + * @alias 'model.bytes' */ modelBytes?: ByteSize + /** The status of the mathematical models. */ 'model.memory_status'?: MlMemoryStatus + /** The status of the mathematical models. + * @alias 'model.memory_status' */ mms?: MlMemoryStatus + /** The status of the mathematical models. + * @alias 'model.memory_status' */ modelMemoryStatus?: MlMemoryStatus + /** The number of bytes over the high limit for memory usage at the last allocation failure. */ 'model.bytes_exceeded'?: ByteSize + /** The number of bytes over the high limit for memory usage at the last allocation failure. + * @alias 'model.bytes_exceeded' */ mbe?: ByteSize + /** The number of bytes over the high limit for memory usage at the last allocation failure. + * @alias 'model.bytes_exceeded' */ modelBytesExceeded?: ByteSize + /** The upper limit for model memory usage, checked on increasing values. */ 'model.memory_limit'?: string + /** The upper limit for model memory usage, checked on increasing values. + * @alias 'model.memory_limit' */ mml?: string + /** The upper limit for model memory usage, checked on increasing values. + * @alias 'model.memory_limit' */ modelMemoryLimit?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.by_fields'?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.by_fields' */ mbf?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.by_fields' */ modelByFields?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.over_fields'?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.over_fields' */ mof?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.over_fields' */ modelOverFields?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.partition_fields'?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.partition_fields' */ mpf?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.partition_fields' */ modelPartitionFields?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. */ 'model.bucket_allocation_failures'?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. + * @alias 'model.bucket_allocation_failures' */ mbaf?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. + * @alias 'model.bucket_allocation_failures' */ modelBucketAllocationFailures?: string + /** The status of categorization for the job. */ 'model.categorization_status'?: MlCategorizationStatus + /** The status of categorization for the job. + * @alias 'model.categorization_status' */ mcs?: MlCategorizationStatus + /** The status of categorization for the job. + * @alias 'model.categorization_status' */ modelCategorizationStatus?: MlCategorizationStatus + /** The number of documents that have had a field categorized. */ 'model.categorized_doc_count'?: string + /** The number of documents that have had a field categorized. + * @alias 'model.categorized_doc_count' */ mcdc?: string + /** The number of documents that have had a field categorized. + * @alias 'model.categorized_doc_count' */ modelCategorizedDocCount?: string + /** The number of categories created by categorization. */ 'model.total_category_count'?: string + /** The number of categories created by categorization. + * @alias 'model.total_category_count' */ mtcc?: string + /** The number of categories created by categorization. + * @alias 'model.total_category_count' */ modelTotalCategoryCount?: string + /** The number of categories that match more than 1% of categorized documents. */ 'model.frequent_category_count'?: string + /** The number of categories that match more than 1% of categorized documents. + * @alias 'model.frequent_category_count' */ modelFrequentCategoryCount?: string + /** The number of categories that match just one categorized document. */ 'model.rare_category_count'?: string + /** The number of categories that match just one categorized document. + * @alias 'model.rare_category_count' */ mrcc?: string + /** The number of categories that match just one categorized document. + * @alias 'model.rare_category_count' */ modelRareCategoryCount?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. */ 'model.dead_category_count'?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. + * @alias 'model.dead_category_count' */ mdcc?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. + * @alias 'model.dead_category_count' */ modelDeadCategoryCount?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. */ 'model.failed_category_count'?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. + * @alias 'model.failed_category_count' */ mfcc?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. + * @alias 'model.failed_category_count' */ modelFailedCategoryCount?: string + /** The timestamp when the model stats were gathered, according to server time. */ 'model.log_time'?: string + /** The timestamp when the model stats were gathered, according to server time. + * @alias 'model.log_time' */ mlt?: string + /** The timestamp when the model stats were gathered, according to server time. + * @alias 'model.log_time' */ modelLogTime?: string + /** The timestamp of the last record when the model stats were gathered. */ 'model.timestamp'?: string + /** The timestamp of the last record when the model stats were gathered. + * @alias 'model.timestamp' */ mt?: string + /** The timestamp of the last record when the model stats were gathered. + * @alias 'model.timestamp' */ modelTimestamp?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. */ 'forecasts.total'?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. + * @alias 'forecasts.total' */ ft?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. + * @alias 'forecasts.total' */ forecastsTotal?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.min'?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.min' */ fmmin?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.min' */ forecastsMemoryMin?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.max'?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.max' */ fmmax?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.max' */ forecastsMemoryMax?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.avg'?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.avg' */ fmavg?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.avg' */ forecastsMemoryAvg?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.total'?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.total' */ fmt?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.total' */ forecastsMemoryTotal?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.min'?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.min' */ frmin?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.min' */ forecastsRecordsMin?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.max'?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.max' */ frmax?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.max' */ forecastsRecordsMax?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.avg'?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.avg' */ fravg?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.avg' */ forecastsRecordsAvg?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.total'?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.total' */ frt?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.total' */ forecastsRecordsTotal?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.min'?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.min' */ ftmin?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.min' */ forecastsTimeMin?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.max'?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.max' */ ftmax?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.max' */ forecastsTimeMax?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.avg'?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.avg' */ ftavg?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.avg' */ forecastsTimeAvg?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.total'?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.total' */ ftt?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.total' */ forecastsTimeTotal?: string + /** The uniqe identifier of the assigned node. */ 'node.id'?: NodeId + /** The uniqe identifier of the assigned node. + * @alias 'node.id' */ ni?: NodeId + /** The uniqe identifier of the assigned node. + * @alias 'node.id' */ nodeId?: NodeId + /** The name of the assigned node. */ 'node.name'?: string + /** The name of the assigned node. + * @alias 'node.name' */ nn?: string + /** The name of the assigned node. + * @alias 'node.name' */ nodeName?: string + /** The ephemeral identifier of the assigned node. */ 'node.ephemeral_id'?: NodeId + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ ne?: NodeId + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: NodeId + /** The network address of the assigned node. */ 'node.address'?: string + /** The network address of the assigned node. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * @alias 'node.address' */ nodeAddress?: string + /** The number of bucket results produced by the job. */ 'buckets.count'?: string + /** The number of bucket results produced by the job. + * @alias 'buckets.count' */ bc?: string + /** The number of bucket results produced by the job. + * @alias 'buckets.count' */ bucketsCount?: string + /** The sum of all bucket processing times, in milliseconds. */ 'buckets.time.total'?: string + /** The sum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.total' */ btt?: string + /** The sum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.total' */ bucketsTimeTotal?: string + /** The minimum of all bucket processing times, in milliseconds. */ 'buckets.time.min'?: string + /** The minimum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.min' */ btmin?: string + /** The minimum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.min' */ bucketsTimeMin?: string + /** The maximum of all bucket processing times, in milliseconds. */ 'buckets.time.max'?: string + /** The maximum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.max' */ btmax?: string + /** The maximum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.max' */ bucketsTimeMax?: string + /** The exponential moving average of all bucket processing times, in milliseconds. */ 'buckets.time.exp_avg'?: string + /** The exponential moving average of all bucket processing times, in milliseconds. + * @alias 'buckets.time.exp_avg' */ btea?: string + /** The exponential moving average of all bucket processing times, in milliseconds. + * @alias 'buckets.time.exp_avg' */ bucketsTimeExpAvg?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. */ 'buckets.time.exp_avg_hour'?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. + * @alias 'buckets.time.exp_avg_hour' */ bteah?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. + * @alias 'buckets.time.exp_avg_hour' */ bucketsTimeExpAvgHour?: string } export interface CatMlJobsRequest extends CatCatRequestBase { + /** Identifier for the anomaly detection job. */ job_id?: Id + /** Specifies what to do when the request: + * + * * Contains wildcard expressions and there are no jobs that match. + * * Contains the `_all` string or no identifiers and there are no matches. + * * Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there + * are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial + * matches. */ allow_no_match?: boolean + /** The unit used to display byte values. */ bytes?: Bytes - h?: CatCatAnonalyDetectorColumns - s?: CatCatAnonalyDetectorColumns + /** Comma-separated list of column names to display. */ + h?: CatCatAnomalyDetectorColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ + s?: CatCatAnomalyDetectorColumns + /** The unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, time?: never } } export type CatMlJobsResponse = CatMlJobsJobsRecord[] export interface CatMlTrainedModelsRequest extends CatCatRequestBase { + /** A unique identifier for the trained model. */ model_id?: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** The unit used to display byte values. */ bytes?: Bytes + /** A comma-separated list of column names to display. */ h?: CatCatTrainedModelsColumns + /** A comma-separated list of column names or aliases used to sort the response. */ s?: CatCatTrainedModelsColumns + /** Skips the specified number of transforms. */ from?: integer + /** The maximum number of transforms to display. */ size?: integer + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, from?: never, size?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, bytes?: never, h?: never, s?: never, from?: never, size?: never, time?: never } } export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] export interface CatMlTrainedModelsTrainedModelsRecord { + /** The model identifier. */ id?: Id + /** Information about the creator of the model. */ created_by?: string + /** Information about the creator of the model. + * @alias created_by */ c?: string + /** Information about the creator of the model. + * @alias created_by */ createdBy?: string + /** The estimated heap size to keep the model in memory. */ heap_size?: ByteSize + /** The estimated heap size to keep the model in memory. + * @alias heap_size */ hs?: ByteSize + /** The estimated heap size to keep the model in memory. + * @alias heap_size */ modelHeapSize?: ByteSize + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. */ operations?: string + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. + * @alias operations */ o?: string + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. + * @alias operations */ modelOperations?: string + /** The license level of the model. */ license?: string + /** The license level of the model. + * @alias license */ l?: string + /** The time the model was created. */ create_time?: DateTime + /** The time the model was created. + * @alias create_time */ ct?: DateTime + /** The version of Elasticsearch when the model was created. */ version?: VersionString + /** The version of Elasticsearch when the model was created. + * @alias version */ v?: VersionString + /** A description of the model. */ description?: string + /** A description of the model. + * @alias description */ d?: string + /** The number of pipelines that are referencing the model. */ 'ingest.pipelines'?: string + /** The number of pipelines that are referencing the model. + * @alias 'ingest.pipelines' */ ip?: string + /** The number of pipelines that are referencing the model. + * @alias 'ingest.pipelines' */ ingestPipelines?: string + /** The total number of documents that are processed by the model. */ 'ingest.count'?: string + /** The total number of documents that are processed by the model. + * @alias 'ingest.count' */ ic?: string + /** The total number of documents that are processed by the model. + * @alias 'ingest.count' */ ingestCount?: string + /** The total time spent processing documents with thie model. */ 'ingest.time'?: string + /** The total time spent processing documents with thie model. + * @alias 'ingest.time' */ it?: string + /** The total time spent processing documents with thie model. + * @alias 'ingest.time' */ ingestTime?: string + /** The total number of documents that are currently being handled by the model. */ 'ingest.current'?: string + /** The total number of documents that are currently being handled by the model. + * @alias 'ingest.current' */ icurr?: string + /** The total number of documents that are currently being handled by the model. + * @alias 'ingest.current' */ ingestCurrent?: string + /** The total number of failed ingest attempts with the model. */ 'ingest.failed'?: string + /** The total number of failed ingest attempts with the model. + * @alias 'ingest.failed' */ if?: string + /** The total number of failed ingest attempts with the model. + * @alias 'ingest.failed' */ ingestFailed?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. */ 'data_frame.id'?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. + * @alias 'data_frame.id' */ dfid?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. + * @alias 'data_frame.id' */ dataFrameAnalytics?: string + /** The time the data frame analytics job was created. */ 'data_frame.create_time'?: string + /** The time the data frame analytics job was created. + * @alias 'data_frame.create_time' */ dft?: string + /** The time the data frame analytics job was created. + * @alias 'data_frame.create_time' */ dataFrameAnalyticsTime?: string + /** The source index used to train in the data frame analysis. */ 'data_frame.source_index'?: string + /** The source index used to train in the data frame analysis. + * @alias 'data_frame.source_index' */ dfsi?: string + /** The source index used to train in the data frame analysis. + * @alias 'data_frame.source_index' */ dataFrameAnalyticsSrcIndex?: string + /** The analysis used by the data frame to build the model. */ 'data_frame.analysis'?: string + /** The analysis used by the data frame to build the model. + * @alias 'data_frame.analysis' */ dfa?: string + /** The analysis used by the data frame to build the model. + * @alias 'data_frame.analysis' */ dataFrameAnalyticsAnalysis?: string type?: string } export interface CatNodeattrsNodeAttributesRecord { + /** The node name. */ node?: string + /** The unique node identifier. */ id?: string + /** The process identifier. */ pid?: string + /** The host name. */ host?: string + /** The host name. + * @alias host */ h?: string + /** The IP address. */ ip?: string + /** The IP address. + * @alias ip */ i?: string + /** The bound transport port. */ port?: string + /** The attribute name. */ attr?: string + /** The attribute value. */ value?: string } export interface CatNodeattrsRequest extends CatCatRequestBase { + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } } export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] export interface CatNodesNodesRecord { + /** The unique node identifier. */ id?: Id + /** The unique node identifier. + * @alias id */ nodeId?: Id + /** The process identifier. */ pid?: string + /** The process identifier. + * @alias pid */ p?: string + /** The IP address. */ ip?: string + /** The IP address. + * @alias ip */ i?: string + /** The bound transport port. */ port?: string + /** The bound transport port. + * @alias port */ po?: string + /** The bound HTTP address. */ http_address?: string + /** The bound HTTP address. + * @alias http_address */ http?: string + /** The Elasticsearch version. */ version?: VersionString + /** The Elasticsearch version. + * @alias version */ v?: VersionString + /** The Elasticsearch distribution flavor. */ flavor?: string + /** The Elasticsearch distribution flavor. + * @alias flavor */ f?: string + /** The Elasticsearch distribution type. */ type?: string + /** The Elasticsearch distribution type. + * @alias type */ t?: string + /** The Elasticsearch build hash. */ build?: string + /** The Elasticsearch build hash. + * @alias build */ b?: string + /** The Java version. */ jdk?: string + /** The Java version. + * @alias jdk */ j?: string + /** The total disk space. */ 'disk.total'?: ByteSize + /** The total disk space. + * @alias 'disk.total' */ dt?: ByteSize + /** The total disk space. + * @alias 'disk.total' */ diskTotal?: ByteSize + /** The used disk space. */ 'disk.used'?: ByteSize + /** The used disk space. + * @alias 'disk.used' */ du?: ByteSize + /** The used disk space. + * @alias 'disk.used' */ diskUsed?: ByteSize + /** The available disk space. */ 'disk.avail'?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ d?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ da?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ disk?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ diskAvail?: ByteSize + /** The used disk space percentage. */ 'disk.used_percent'?: Percentage + /** The used disk space percentage. + * @alias 'disk.used_percent' */ dup?: Percentage + /** The used disk space percentage. + * @alias 'disk.used_percent' */ diskUsedPercent?: Percentage + /** The used heap. */ 'heap.current'?: string + /** The used heap. + * @alias 'heap.current' */ hc?: string + /** The used heap. + * @alias 'heap.current' */ heapCurrent?: string + /** The used heap ratio. */ 'heap.percent'?: Percentage + /** The used heap ratio. + * @alias 'heap.percent' */ hp?: Percentage + /** The used heap ratio. + * @alias 'heap.percent' */ heapPercent?: Percentage + /** The maximum configured heap. */ 'heap.max'?: string + /** The maximum configured heap. + * @alias 'heap.max' */ hm?: string + /** The maximum configured heap. + * @alias 'heap.max' */ heapMax?: string + /** The used machine memory. */ 'ram.current'?: string + /** The used machine memory. + * @alias 'ram.current' */ rc?: string + /** The used machine memory. + * @alias 'ram.current' */ ramCurrent?: string + /** The used machine memory ratio. */ 'ram.percent'?: Percentage + /** The used machine memory ratio. + * @alias 'ram.percent' */ rp?: Percentage + /** The used machine memory ratio. + * @alias 'ram.percent' */ ramPercent?: Percentage + /** The total machine memory. */ 'ram.max'?: string + /** The total machine memory. + * @alias 'ram.max' */ rn?: string + /** The total machine memory. + * @alias 'ram.max' */ ramMax?: string + /** The used file descriptors. */ 'file_desc.current'?: string + /** The used file descriptors. + * @alias 'file_desc.current' */ fdc?: string + /** The used file descriptors. + * @alias 'file_desc.current' */ fileDescriptorCurrent?: string + /** The used file descriptor ratio. */ 'file_desc.percent'?: Percentage + /** The used file descriptor ratio. + * @alias 'file_desc.percent' */ fdp?: Percentage + /** The used file descriptor ratio. + * @alias 'file_desc.percent' */ fileDescriptorPercent?: Percentage + /** The maximum number of file descriptors. */ 'file_desc.max'?: string + /** The maximum number of file descriptors. + * @alias 'file_desc.max' */ fdm?: string + /** The maximum number of file descriptors. + * @alias 'file_desc.max' */ fileDescriptorMax?: string + /** The recent system CPU usage as a percentage. */ cpu?: string + /** The load average for the most recent minute. */ load_1m?: string + /** The load average for the last five minutes. */ load_5m?: string + /** The load average for the last fifteen minutes. */ load_15m?: string + /** The load average for the last fifteen minutes. + * @alias load_15m */ l?: string + /** The node uptime. */ uptime?: string + /** The node uptime. + * @alias uptime */ u?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). */ 'node.role'?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ r?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ role?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ nodeRole?: string + /** Indicates whether the node is the elected master node. + * Returned values include `*`(elected master) and `-`(not elected master). */ master?: string + /** Indicates whether the node is the elected master node. + * Returned values include `*`(elected master) and `-`(not elected master). + * @alias master */ m?: string + /** The node name. */ name?: Name + /** The node name. + * @alias name */ n?: Name + /** The size of completion. */ 'completion.size'?: string + /** The size of completion. + * @alias 'completion.size' */ cs?: string + /** The size of completion. + * @alias 'completion.size' */ completionSize?: string + /** The used fielddata cache. */ 'fielddata.memory_size'?: string + /** The used fielddata cache. + * @alias 'fielddata.memory_size' */ fm?: string + /** The used fielddata cache. + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** The fielddata evictions. */ 'fielddata.evictions'?: string + /** The fielddata evictions. + * @alias 'fielddata.evictions' */ fe?: string + /** The fielddata evictions. + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** The used query cache. */ 'query_cache.memory_size'?: string + /** The used query cache. + * @alias 'query_cache.memory_size' */ qcm?: string + /** The used query cache. + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** The query cache evictions. */ 'query_cache.evictions'?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ qce?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** The query cache hit counts. */ 'query_cache.hit_count'?: string + /** The query cache hit counts. + * @alias 'query_cache.hit_count' */ qchc?: string + /** The query cache hit counts. + * @alias 'query_cache.hit_count' */ queryCacheHitCount?: string + /** The query cache miss counts. */ 'query_cache.miss_count'?: string + /** The query cache miss counts. + * @alias 'query_cache.miss_count' */ qcmc?: string + /** The query cache miss counts. + * @alias 'query_cache.miss_count' */ queryCacheMissCount?: string + /** The used request cache. */ 'request_cache.memory_size'?: string + /** The used request cache. + * @alias 'request_cache.memory_size' */ rcm?: string + /** The used request cache. + * @alias 'request_cache.memory_size' */ requestCacheMemory?: string + /** The request cache evictions. */ 'request_cache.evictions'?: string + /** The request cache evictions. + * @alias 'request_cache.evictions' */ rce?: string + /** The request cache evictions. + * @alias 'request_cache.evictions' */ requestCacheEvictions?: string + /** The request cache hit counts. */ 'request_cache.hit_count'?: string + /** The request cache hit counts. + * @alias 'request_cache.hit_count' */ rchc?: string + /** The request cache hit counts. + * @alias 'request_cache.hit_count' */ requestCacheHitCount?: string + /** The request cache miss counts. */ 'request_cache.miss_count'?: string + /** The request cache miss counts. + * @alias 'request_cache.miss_count' */ rcmc?: string + /** The request cache miss counts. + * @alias 'request_cache.miss_count' */ requestCacheMissCount?: string + /** The number of flushes. */ 'flush.total'?: string + /** The number of flushes. + * @alias 'flush.total' */ ft?: string + /** The number of flushes. + * @alias 'flush.total' */ flushTotal?: string + /** The time spent in flush. */ 'flush.total_time'?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ ftt?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ flushTotalTime?: string + /** The number of current get ops. */ 'get.current'?: string + /** The number of current get ops. + * @alias 'get.current' */ gc?: string + /** The number of current get ops. + * @alias 'get.current' */ getCurrent?: string + /** The time spent in get. */ 'get.time'?: string + /** The time spent in get. + * @alias 'get.time' */ gti?: string + /** The time spent in get. + * @alias 'get.time' */ getTime?: string + /** The number of get ops. */ 'get.total'?: string + /** The number of get ops. + * @alias 'get.total' */ gto?: string + /** The number of get ops. + * @alias 'get.total' */ getTotal?: string + /** The time spent in successful gets. */ 'get.exists_time'?: string + /** The time spent in successful gets. + * @alias 'get.exists_time' */ geti?: string + /** The time spent in successful gets. + * @alias 'get.exists_time' */ getExistsTime?: string + /** The number of successful get operations. */ 'get.exists_total'?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ geto?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ getExistsTotal?: string + /** The time spent in failed gets. */ 'get.missing_time'?: string + /** The time spent in failed gets. + * @alias 'get.missing_time' */ gmti?: string + /** The time spent in failed gets. + * @alias 'get.missing_time' */ getMissingTime?: string + /** The number of failed gets. */ 'get.missing_total'?: string + /** The number of failed gets. + * @alias 'get.missing_total' */ gmto?: string + /** The number of failed gets. + * @alias 'get.missing_total' */ getMissingTotal?: string + /** The number of current deletions. */ 'indexing.delete_current'?: string + /** The number of current deletions. + * @alias 'indexing.delete_current' */ idc?: string + /** The number of current deletions. + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** The time spent in deletions. */ 'indexing.delete_time'?: string + /** The time spent in deletions. + * @alias 'indexing.delete_time' */ idti?: string + /** The time spent in deletions. + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** The number of delete operations. */ 'indexing.delete_total'?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ idto?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** The number of current indexing operations. */ 'indexing.index_current'?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ iic?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** The time spent in indexing. */ 'indexing.index_time'?: string + /** The time spent in indexing. + * @alias 'indexing.index_time' */ iiti?: string + /** The time spent in indexing. + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** The number of indexing operations. */ 'indexing.index_total'?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ iito?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** The number of failed indexing operations. */ 'indexing.index_failed'?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ iif?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** The number of current merges. */ 'merges.current'?: string + /** The number of current merges. + * @alias 'merges.current' */ mc?: string + /** The number of current merges. + * @alias 'merges.current' */ mergesCurrent?: string + /** The number of current merging docs. */ 'merges.current_docs'?: string + /** The number of current merging docs. + * @alias 'merges.current_docs' */ mcd?: string + /** The number of current merging docs. + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** The size of current merges. */ 'merges.current_size'?: string + /** The size of current merges. + * @alias 'merges.current_size' */ mcs?: string + /** The size of current merges. + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** The number of completed merge operations. */ 'merges.total'?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mt?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mergesTotal?: string + /** The docs merged. */ 'merges.total_docs'?: string + /** The docs merged. + * @alias 'merges.total_docs' */ mtd?: string + /** The docs merged. + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** The size merged. */ 'merges.total_size'?: string + /** The size merged. + * @alias 'merges.total_size' */ mts?: string + /** The size merged. + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** The time spent in merges. */ 'merges.total_time'?: string + /** The time spent in merges. + * @alias 'merges.total_time' */ mtt?: string + /** The time spent in merges. + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** The total refreshes. */ 'refresh.total'?: string + /** The time spent in refreshes. */ 'refresh.time'?: string + /** The total external refreshes. */ 'refresh.external_total'?: string + /** The total external refreshes. + * @alias 'refresh.external_total' */ rto?: string + /** The total external refreshes. + * @alias 'refresh.external_total' */ refreshTotal?: string + /** The time spent in external refreshes. */ 'refresh.external_time'?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ rti?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ refreshTime?: string + /** The number of pending refresh listeners. */ 'refresh.listeners'?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ rli?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ refreshListeners?: string + /** The total script compilations. */ 'script.compilations'?: string + /** The total script compilations. + * @alias 'script.compilations' */ scrcc?: string + /** The total script compilations. + * @alias 'script.compilations' */ scriptCompilations?: string + /** The total compiled scripts evicted from the cache. */ 'script.cache_evictions'?: string + /** The total compiled scripts evicted from the cache. + * @alias 'script.cache_evictions' */ scrce?: string + /** The total compiled scripts evicted from the cache. + * @alias 'script.cache_evictions' */ scriptCacheEvictions?: string + /** The script cache compilation limit triggered. */ 'script.compilation_limit_triggered'?: string + /** The script cache compilation limit triggered. + * @alias 'script.compilation_limit_triggered' */ scrclt?: string + /** The script cache compilation limit triggered. + * @alias 'script.compilation_limit_triggered' */ scriptCacheCompilationLimitTriggered?: string + /** The current fetch phase operations. */ 'search.fetch_current'?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ sfc?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** The time spent in fetch phase. */ 'search.fetch_time'?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ sfti?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** The total fetch operations. */ 'search.fetch_total'?: string + /** The total fetch operations. + * @alias 'search.fetch_total' */ sfto?: string + /** The total fetch operations. + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** The open search contexts. */ 'search.open_contexts'?: string + /** The open search contexts. + * @alias 'search.open_contexts' */ so?: string + /** The open search contexts. + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** The current query phase operations. */ 'search.query_current'?: string + /** The current query phase operations. + * @alias 'search.query_current' */ sqc?: string + /** The current query phase operations. + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** The time spent in query phase. */ 'search.query_time'?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ sqti?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ searchQueryTime?: string + /** The total query phase operations. */ 'search.query_total'?: string + /** The total query phase operations. + * @alias 'search.query_total' */ sqto?: string + /** The total query phase operations. + * @alias 'search.query_total' */ searchQueryTotal?: string + /** The open scroll contexts. */ 'search.scroll_current'?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ scc?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** The time scroll contexts held open. */ 'search.scroll_time'?: string + /** The time scroll contexts held open. + * @alias 'search.scroll_time' */ scti?: string + /** The time scroll contexts held open. + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** The completed scroll contexts. */ 'search.scroll_total'?: string + /** The completed scroll contexts. + * @alias 'search.scroll_total' */ scto?: string + /** The completed scroll contexts. + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** The number of segments. */ 'segments.count'?: string + /** The number of segments. + * @alias 'segments.count' */ sc?: string + /** The number of segments. + * @alias 'segments.count' */ segmentsCount?: string + /** The memory used by segments. */ 'segments.memory'?: string + /** The memory used by segments. + * @alias 'segments.memory' */ sm?: string + /** The memory used by segments. + * @alias 'segments.memory' */ segmentsMemory?: string + /** The memory used by the index writer. */ 'segments.index_writer_memory'?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ siwm?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** The memory used by the version map. */ 'segments.version_map_memory'?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ svmm?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. */ 'segments.fixed_bitset_memory'?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** The number of current suggest operations. */ 'suggest.current'?: string + /** The number of current suggest operations. + * @alias 'suggest.current' */ suc?: string + /** The number of current suggest operations. + * @alias 'suggest.current' */ suggestCurrent?: string + /** The time spend in suggest. */ 'suggest.time'?: string + /** The time spend in suggest. + * @alias 'suggest.time' */ suti?: string + /** The time spend in suggest. + * @alias 'suggest.time' */ suggestTime?: string + /** The number of suggest operations. */ 'suggest.total'?: string + /** The number of suggest operations. + * @alias 'suggest.total' */ suto?: string + /** The number of suggest operations. + * @alias 'suggest.total' */ suggestTotal?: string + /** The number of bulk shard operations. */ 'bulk.total_operations'?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bto?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bulkTotalOperations?: string + /** The time spend in shard bulk. */ 'bulk.total_time'?: string + /** The time spend in shard bulk. + * @alias 'bulk.total_time' */ btti?: string + /** The time spend in shard bulk. + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** The total size in bytes of shard bulk. */ 'bulk.total_size_in_bytes'?: string + /** The total size in bytes of shard bulk. + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** The total size in bytes of shard bulk. + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** The average time spend in shard bulk. */ 'bulk.avg_time'?: string + /** The average time spend in shard bulk. + * @alias 'bulk.avg_time' */ bati?: string + /** The average time spend in shard bulk. + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** The average size in bytes of shard bulk. */ 'bulk.avg_size_in_bytes'?: string + /** The average size in bytes of shard bulk. + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** The average size in bytes of shard bulk. + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string } export interface CatNodesRequest extends CatCatRequestBase { + /** The unit used to display byte values. */ bytes?: Bytes + /** If `true`, return the full node ID. If `false`, return the shortened node ID. */ full_id?: boolean | string + /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ h?: CatCatNodeColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** The period to wait for a connection to the master node. */ master_timeout?: Duration + /** The unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { bytes?: never, full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never, time?: never } } export type CatNodesResponse = CatNodesNodesRecord[] export interface CatPendingTasksPendingTasksRecord { + /** The task insertion order. */ insertOrder?: string + /** The task insertion order. + * @alias insertOrder */ o?: string + /** Indicates how long the task has been in queue. */ timeInQueue?: string + /** Indicates how long the task has been in queue. + * @alias timeInQueue */ t?: string + /** The task priority. */ priority?: string + /** The task priority. + * @alias priority */ p?: string + /** The task source. */ source?: string + /** The task source. + * @alias source */ s?: string } export interface CatPendingTasksRequest extends CatCatRequestBase { + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never, time?: never } } export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] export interface CatPluginsPluginsRecord { + /** The unique node identifier. */ id?: NodeId + /** The node name. */ name?: Name + /** The node name. + * @alias name */ n?: Name + /** The component name. */ component?: string + /** The component name. + * @alias component */ c?: string + /** The component version. */ version?: VersionString + /** The component version. + * @alias version */ v?: VersionString + /** The plugin details. */ description?: string + /** The plugin details. + * @alias description */ d?: string + /** The plugin type. */ type?: string + /** The plugin type. + * @alias type */ t?: string } export interface CatPluginsRequest extends CatCatRequestBase { + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** Include bootstrap plugins in the response */ include_bootstrap?: boolean + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, include_bootstrap?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, include_bootstrap?: never, local?: never, master_timeout?: never } } export type CatPluginsResponse = CatPluginsPluginsRecord[] export interface CatRecoveryRecoveryRecord { + /** The index name. */ index?: IndexName + /** The index name. + * @alias index */ i?: IndexName + /** The index name. + * @alias index */ idx?: IndexName + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The recovery start time. */ start_time?: DateTime + /** The recovery start time. + * @alias start_time */ start?: DateTime + /** The recovery start time in epoch milliseconds. */ start_time_millis?: EpochTime + /** The recovery start time in epoch milliseconds. + * @alias start_time_millis */ start_millis?: EpochTime + /** The recovery stop time. */ stop_time?: DateTime + /** The recovery stop time. + * @alias stop_time */ stop?: DateTime + /** The recovery stop time in epoch milliseconds. */ stop_time_millis?: EpochTime + /** The recovery stop time in epoch milliseconds. + * @alias stop_time_millis */ stop_millis?: EpochTime + /** The recovery time. */ time?: Duration + /** The recovery time. + * @alias time */ t?: Duration + /** The recovery time. + * @alias time */ ti?: Duration + /** The recovery type. */ type?: string + /** The recovery type. + * @alias type */ ty?: string + /** The recovery stage. */ stage?: string + /** The recovery stage. + * @alias stage */ st?: string + /** The source host. */ source_host?: string + /** The source host. + * @alias source_host */ shost?: string + /** The source node name. */ source_node?: string + /** The source node name. + * @alias source_node */ snode?: string + /** The target host. */ target_host?: string + /** The target host. + * @alias target_host */ thost?: string + /** The target node name. */ target_node?: string + /** The target node name. + * @alias target_node */ tnode?: string + /** The repository name. */ repository?: string + /** The repository name. + * @alias repository */ rep?: string + /** The snapshot name. */ snapshot?: string + /** The snapshot name. + * @alias snapshot */ snap?: string + /** The number of files to recover. */ files?: string + /** The number of files to recover. + * @alias files */ f?: string + /** The files recovered. */ files_recovered?: string + /** The files recovered. + * @alias files_recovered */ fr?: string + /** The ratio of files recovered. */ files_percent?: Percentage + /** The ratio of files recovered. + * @alias files_percent */ fp?: Percentage + /** The total number of files. */ files_total?: string + /** The total number of files. + * @alias files_total */ tf?: string + /** The number of bytes to recover. */ bytes?: string + /** The number of bytes to recover. + * @alias bytes */ b?: string + /** The bytes recovered. */ bytes_recovered?: string + /** The bytes recovered. + * @alias bytes_recovered */ br?: string + /** The ratio of bytes recovered. */ bytes_percent?: Percentage + /** The ratio of bytes recovered. + * @alias bytes_percent */ bp?: Percentage + /** The total number of bytes. */ bytes_total?: string + /** The total number of bytes. + * @alias bytes_total */ tb?: string + /** The number of translog operations to recover. */ translog_ops?: string + /** The number of translog operations to recover. + * @alias translog_ops */ to?: string + /** The translog operations recovered. */ translog_ops_recovered?: string + /** The translog operations recovered. + * @alias translog_ops_recovered */ tor?: string + /** The ratio of translog operations recovered. */ translog_ops_percent?: Percentage + /** The ratio of translog operations recovered. + * @alias translog_ops_percent */ top?: Percentage } export interface CatRecoveryRequest extends CatCatRequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean + /** The unit used to display byte values. */ bytes?: Bytes + /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ h?: CatCatRecoveryColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** The unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } } export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] export interface CatRepositoriesRepositoriesRecord { + /** The unique repository identifier. */ id?: string + /** The unique repository identifier. + * @alias id */ repoId?: string + /** The repository type. */ type?: string + /** The repository type. + * @alias type */ t?: string } export interface CatRepositoriesRequest extends CatCatRequestBase { + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } } export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] export interface CatSegmentsRequest extends CatCatRequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** The unit used to display byte values. */ bytes?: Bytes + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ h?: CatCatSegmentsColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] export interface CatSegmentsSegmentsRecord { + /** The index name. */ index?: IndexName + /** The index name. + * @alias index */ i?: IndexName + /** The index name. + * @alias index */ idx?: IndexName + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The shard type: `primary` or `replica`. */ prirep?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ p?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ pr?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ primaryOrReplica?: string + /** The IP address of the node where it lives. */ ip?: string + /** The unique identifier of the node where it lives. */ id?: NodeId + /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. */ segment?: string + /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. + * @alias segment */ seg?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. */ generation?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. + * @alias generation */ g?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. + * @alias generation */ gen?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. */ 'docs.count'?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. + * @alias 'docs.count' */ dc?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. + * @alias 'docs.count' */ docsCount?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. */ 'docs.deleted'?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + * @alias 'docs.deleted' */ dd?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + * @alias 'docs.deleted' */ docsDeleted?: string + /** The segment size in bytes. */ size?: ByteSize + /** The segment size in bytes. + * @alias size */ si?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. */ 'size.memory'?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. + * @alias 'size.memory' */ sm?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. + * @alias 'size.memory' */ sizeMemory?: ByteSize + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. */ committed?: string + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + * @alias committed */ ic?: string + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + * @alias committed */ isCommitted?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. */ searchable?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. + * @alias searchable */ is?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. + * @alias searchable */ isSearchable?: string + /** The version of Lucene used to write the segment. */ version?: VersionString + /** The version of Lucene used to write the segment. + * @alias version */ v?: VersionString + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. */ compound?: string + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. + * @alias compound */ ico?: string + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. + * @alias compound */ isCompound?: string } export interface CatShardsRequest extends CatCatRequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** The unit used to display byte values. */ bytes?: Bytes + /** List of columns to appear in the response. Supports simple wildcards. */ h?: CatCatShardColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** The period to wait for a connection to the master node. */ master_timeout?: Duration + /** The unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } } export type CatShardsResponse = CatShardsShardsRecord[] export interface CatShardsShardsRecord { + /** The index name. */ index?: string + /** The index name. + * @alias index */ i?: string + /** The index name. + * @alias index */ idx?: string + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The shard type: `primary` or `replica`. */ prirep?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ p?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ pr?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ primaryOrReplica?: string + /** The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. */ state?: string + /** The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. + * @alias state */ st?: string + /** The number of documents in the shard. */ docs?: string | null + /** The number of documents in the shard. + * @alias docs */ d?: string | null + /** The number of documents in the shard. + * @alias docs */ dc?: string | null + /** The disk space used by the shard. */ store?: string | null + /** The disk space used by the shard. + * @alias store */ sto?: string | null + /** total size of dataset (including the cache for partially mounted indices) */ dataset?: string | null + /** The IP address of the node. */ ip?: string | null + /** The unique identifier for the node. */ id?: string + /** The name of node. */ node?: string | null + /** The name of node. + * @alias node */ n?: string | null + /** The sync identifier. */ sync_id?: string + /** The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. */ 'unassigned.reason'?: string + /** The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. + * @alias 'unassigned.reason' */ ur?: string + /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). */ 'unassigned.at'?: string + /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). + * @alias 'unassigned.at' */ ua?: string + /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). */ 'unassigned.for'?: string + /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). + * @alias 'unassigned.for' */ uf?: string + /** Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. */ 'unassigned.details'?: string + /** Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. + * @alias 'unassigned.details' */ ud?: string + /** The type of recovery source. */ 'recoverysource.type'?: string + /** The type of recovery source. + * @alias 'recoverysource.type' */ rs?: string + /** The size of completion. */ 'completion.size'?: string + /** The size of completion. + * @alias 'completion.size' */ cs?: string + /** The size of completion. + * @alias 'completion.size' */ completionSize?: string + /** The used fielddata cache memory. */ 'fielddata.memory_size'?: string + /** The used fielddata cache memory. + * @alias 'fielddata.memory_size' */ fm?: string + /** The used fielddata cache memory. + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** The fielddata cache evictions. */ 'fielddata.evictions'?: string + /** The fielddata cache evictions. + * @alias 'fielddata.evictions' */ fe?: string + /** The fielddata cache evictions. + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** The used query cache memory. */ 'query_cache.memory_size'?: string + /** The used query cache memory. + * @alias 'query_cache.memory_size' */ qcm?: string + /** The used query cache memory. + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** The query cache evictions. */ 'query_cache.evictions'?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ qce?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** The number of flushes. */ 'flush.total'?: string + /** The number of flushes. + * @alias 'flush.total' */ ft?: string + /** The number of flushes. + * @alias 'flush.total' */ flushTotal?: string + /** The time spent in flush. */ 'flush.total_time'?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ ftt?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ flushTotalTime?: string + /** The number of current get operations. */ 'get.current'?: string + /** The number of current get operations. + * @alias 'get.current' */ gc?: string + /** The number of current get operations. + * @alias 'get.current' */ getCurrent?: string + /** The time spent in get operations. */ 'get.time'?: string + /** The time spent in get operations. + * @alias 'get.time' */ gti?: string + /** The time spent in get operations. + * @alias 'get.time' */ getTime?: string + /** The number of get operations. */ 'get.total'?: string + /** The number of get operations. + * @alias 'get.total' */ gto?: string + /** The number of get operations. + * @alias 'get.total' */ getTotal?: string + /** The time spent in successful get operations. */ 'get.exists_time'?: string + /** The time spent in successful get operations. + * @alias 'get.exists_time' */ geti?: string + /** The time spent in successful get operations. + * @alias 'get.exists_time' */ getExistsTime?: string + /** The number of successful get operations. */ 'get.exists_total'?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ geto?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ getExistsTotal?: string + /** The time spent in failed get operations. */ 'get.missing_time'?: string + /** The time spent in failed get operations. + * @alias 'get.missing_time' */ gmti?: string + /** The time spent in failed get operations. + * @alias 'get.missing_time' */ getMissingTime?: string + /** The number of failed get operations. */ 'get.missing_total'?: string + /** The number of failed get operations. + * @alias 'get.missing_total' */ gmto?: string + /** The number of failed get operations. + * @alias 'get.missing_total' */ getMissingTotal?: string + /** The number of current deletion operations. */ 'indexing.delete_current'?: string + /** The number of current deletion operations. + * @alias 'indexing.delete_current' */ idc?: string + /** The number of current deletion operations. + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** The time spent in deletion operations. */ 'indexing.delete_time'?: string + /** The time spent in deletion operations. + * @alias 'indexing.delete_time' */ idti?: string + /** The time spent in deletion operations. + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** The number of delete operations. */ 'indexing.delete_total'?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ idto?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** The number of current indexing operations. */ 'indexing.index_current'?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ iic?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** The time spent in indexing operations. */ 'indexing.index_time'?: string + /** The time spent in indexing operations. + * @alias 'indexing.index_time' */ iiti?: string + /** The time spent in indexing operations. + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** The number of indexing operations. */ 'indexing.index_total'?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ iito?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** The number of failed indexing operations. */ 'indexing.index_failed'?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ iif?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** The number of current merge operations. */ 'merges.current'?: string + /** The number of current merge operations. + * @alias 'merges.current' */ mc?: string + /** The number of current merge operations. + * @alias 'merges.current' */ mergesCurrent?: string + /** The number of current merging documents. */ 'merges.current_docs'?: string + /** The number of current merging documents. + * @alias 'merges.current_docs' */ mcd?: string + /** The number of current merging documents. + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** The size of current merge operations. */ 'merges.current_size'?: string + /** The size of current merge operations. + * @alias 'merges.current_size' */ mcs?: string + /** The size of current merge operations. + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** The number of completed merge operations. */ 'merges.total'?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mt?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mergesTotal?: string + /** The nuber of merged documents. */ 'merges.total_docs'?: string + /** The nuber of merged documents. + * @alias 'merges.total_docs' */ mtd?: string + /** The nuber of merged documents. + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** The size of current merges. */ 'merges.total_size'?: string + /** The size of current merges. + * @alias 'merges.total_size' */ mts?: string + /** The size of current merges. + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** The time spent merging documents. */ 'merges.total_time'?: string + /** The time spent merging documents. + * @alias 'merges.total_time' */ mtt?: string + /** The time spent merging documents. + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** The total number of refreshes. */ 'refresh.total'?: string + /** The time spent in refreshes. */ 'refresh.time'?: string + /** The total nunber of external refreshes. */ 'refresh.external_total'?: string + /** The total nunber of external refreshes. + * @alias 'refresh.external_total' */ rto?: string + /** The total nunber of external refreshes. + * @alias 'refresh.external_total' */ refreshTotal?: string + /** The time spent in external refreshes. */ 'refresh.external_time'?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ rti?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ refreshTime?: string + /** The number of pending refresh listeners. */ 'refresh.listeners'?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ rli?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ refreshListeners?: string + /** The current fetch phase operations. */ 'search.fetch_current'?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ sfc?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** The time spent in fetch phase. */ 'search.fetch_time'?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ sfti?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** The total number of fetch operations. */ 'search.fetch_total'?: string + /** The total number of fetch operations. + * @alias 'search.fetch_total' */ sfto?: string + /** The total number of fetch operations. + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** The number of open search contexts. */ 'search.open_contexts'?: string + /** The number of open search contexts. + * @alias 'search.open_contexts' */ so?: string + /** The number of open search contexts. + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** The current query phase operations. */ 'search.query_current'?: string + /** The current query phase operations. + * @alias 'search.query_current' */ sqc?: string + /** The current query phase operations. + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** The time spent in query phase. */ 'search.query_time'?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ sqti?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ searchQueryTime?: string + /** The total number of query phase operations. */ 'search.query_total'?: string + /** The total number of query phase operations. + * @alias 'search.query_total' */ sqto?: string + /** The total number of query phase operations. + * @alias 'search.query_total' */ searchQueryTotal?: string + /** The open scroll contexts. */ 'search.scroll_current'?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ scc?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** The time scroll contexts were held open. */ 'search.scroll_time'?: string + /** The time scroll contexts were held open. + * @alias 'search.scroll_time' */ scti?: string + /** The time scroll contexts were held open. + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** The number of completed scroll contexts. */ 'search.scroll_total'?: string + /** The number of completed scroll contexts. + * @alias 'search.scroll_total' */ scto?: string + /** The number of completed scroll contexts. + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** The number of segments. */ 'segments.count'?: string + /** The number of segments. + * @alias 'segments.count' */ sc?: string + /** The number of segments. + * @alias 'segments.count' */ segmentsCount?: string + /** The memory used by segments. */ 'segments.memory'?: string + /** The memory used by segments. + * @alias 'segments.memory' */ sm?: string + /** The memory used by segments. + * @alias 'segments.memory' */ segmentsMemory?: string + /** The memory used by the index writer. */ 'segments.index_writer_memory'?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ siwm?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** The memory used by the version map. */ 'segments.version_map_memory'?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ svmm?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. */ 'segments.fixed_bitset_memory'?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** The maximum sequence number. */ 'seq_no.max'?: string + /** The maximum sequence number. + * @alias 'seq_no.max' */ sqm?: string + /** The maximum sequence number. + * @alias 'seq_no.max' */ maxSeqNo?: string + /** The local checkpoint. */ 'seq_no.local_checkpoint'?: string + /** The local checkpoint. + * @alias 'seq_no.local_checkpoint' */ sql?: string + /** The local checkpoint. + * @alias 'seq_no.local_checkpoint' */ localCheckpoint?: string + /** The global checkpoint. */ 'seq_no.global_checkpoint'?: string + /** The global checkpoint. + * @alias 'seq_no.global_checkpoint' */ sqg?: string + /** The global checkpoint. + * @alias 'seq_no.global_checkpoint' */ globalCheckpoint?: string + /** The number of current warmer operations. */ 'warmer.current'?: string + /** The number of current warmer operations. + * @alias 'warmer.current' */ wc?: string + /** The number of current warmer operations. + * @alias 'warmer.current' */ warmerCurrent?: string + /** The total number of warmer operations. */ 'warmer.total'?: string + /** The total number of warmer operations. + * @alias 'warmer.total' */ wto?: string + /** The total number of warmer operations. + * @alias 'warmer.total' */ warmerTotal?: string + /** The time spent in warmer operations. */ 'warmer.total_time'?: string + /** The time spent in warmer operations. + * @alias 'warmer.total_time' */ wtt?: string + /** The time spent in warmer operations. + * @alias 'warmer.total_time' */ warmerTotalTime?: string + /** The shard data path. */ 'path.data'?: string + /** The shard data path. + * @alias 'path.data' */ pd?: string + /** The shard data path. + * @alias 'path.data' */ dataPath?: string + /** The shard state path. */ 'path.state'?: string + /** The shard state path. + * @alias 'path.state' */ ps?: string + /** The shard state path. + * @alias 'path.state' */ statsPath?: string + /** The number of bulk shard operations. */ 'bulk.total_operations'?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bto?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bulkTotalOperations?: string + /** The time spent in shard bulk operations. */ 'bulk.total_time'?: string + /** The time spent in shard bulk operations. + * @alias 'bulk.total_time' */ btti?: string + /** The time spent in shard bulk operations. + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** The total size in bytes of shard bulk operations. */ 'bulk.total_size_in_bytes'?: string + /** The total size in bytes of shard bulk operations. + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** The total size in bytes of shard bulk operations. + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** The average time spent in shard bulk operations. */ 'bulk.avg_time'?: string + /** The average time spent in shard bulk operations. + * @alias 'bulk.avg_time' */ bati?: string + /** The average time spent in shard bulk operations. + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** The average size in bytes of shard bulk operations. */ 'bulk.avg_size_in_bytes'?: string + /** The average size in bytes of shard bulk operations. + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** The average size in bytes of shard bulk operations. + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string } export interface CatSnapshotsRequest extends CatCatRequestBase { + /** A comma-separated list of snapshot repositories used to limit the request. + * Accepts wildcard expressions. + * `_all` returns all repositories. + * If any repository fails during the request, Elasticsearch returns an error. */ repository?: Names + /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ h?: CatCatSnapshotsColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Unit used to display time values. */ time?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never, time?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never, time?: never } } export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] export interface CatSnapshotsSnapshotsRecord { + /** The unique identifier for the snapshot. */ id?: string + /** The unique identifier for the snapshot. + * @alias id */ snapshot?: string + /** The repository name. */ repository?: string + /** The repository name. + * @alias repository */ re?: string + /** The repository name. + * @alias repository */ repo?: string + /** The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. */ status?: string + /** The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. + * @alias status */ s?: string + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. */ start_epoch?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. + * @alias start_epoch */ ste?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. + * @alias start_epoch */ startEpoch?: SpecUtilsStringified> + /** The time (HH:MM:SS) at which the snapshot process started. */ start_time?: WatcherScheduleTimeOfDay + /** The time (HH:MM:SS) at which the snapshot process started. + * @alias start_time */ sti?: WatcherScheduleTimeOfDay + /** The time (HH:MM:SS) at which the snapshot process started. + * @alias start_time */ startTime?: WatcherScheduleTimeOfDay + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. */ end_epoch?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. + * @alias end_epoch */ ete?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. + * @alias end_epoch */ endEpoch?: SpecUtilsStringified> + /** The time (HH:MM:SS) at which the snapshot process ended. */ end_time?: TimeOfDay + /** The time (HH:MM:SS) at which the snapshot process ended. + * @alias end_time */ eti?: TimeOfDay + /** The time (HH:MM:SS) at which the snapshot process ended. + * @alias end_time */ endTime?: TimeOfDay + /** The time it took the snapshot process to complete, in time units. */ duration?: Duration + /** The time it took the snapshot process to complete, in time units. + * @alias duration */ dur?: Duration + /** The number of indices in the snapshot. */ indices?: string + /** The number of indices in the snapshot. + * @alias indices */ i?: string + /** The number of successful shards in the snapshot. */ successful_shards?: string + /** The number of successful shards in the snapshot. + * @alias successful_shards */ ss?: string + /** The number of failed shards in the snapshot. */ failed_shards?: string + /** The number of failed shards in the snapshot. + * @alias failed_shards */ fs?: string + /** The total number of shards in the snapshot. */ total_shards?: string + /** The total number of shards in the snapshot. + * @alias total_shards */ ts?: string + /** The reason for any snapshot failures. */ reason?: string + /** The reason for any snapshot failures. + * @alias reason */ r?: string } export interface CatTasksRequest extends CatCatRequestBase { + /** The task action names, which are used to limit the response. */ actions?: string[] + /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** Unique node identifiers, which are used to limit the response. */ nodes?: string[] + /** The parent task identifier, which is used to limit the response. */ parent_task_id?: string + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** Unit used to display time values. */ time?: TimeUnit + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, time?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, time?: never, timeout?: never, wait_for_completion?: never } } export type CatTasksResponse = CatTasksTasksRecord[] export interface CatTasksTasksRecord { + /** The identifier of the task with the node. */ id?: Id + /** The task action. */ action?: string + /** The task action. + * @alias action */ ac?: string + /** The unique task identifier. */ task_id?: Id + /** The unique task identifier. + * @alias task_id */ ti?: Id + /** The parent task identifier. */ parent_task_id?: string + /** The parent task identifier. + * @alias parent_task_id */ pti?: string + /** The task type. */ type?: string + /** The task type. + * @alias type */ ty?: string + /** The start time in milliseconds. */ start_time?: string + /** The start time in milliseconds. + * @alias start_time */ start?: string + /** The start time in `HH:MM:SS` format. */ timestamp?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ ts?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ hms?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ hhmmss?: string + /** The running time in nanoseconds. */ running_time_ns?: string + /** The running time. */ running_time?: string + /** The running time. + * @alias running_time */ time?: string + /** The unique node identifier. */ node_id?: NodeId + /** The unique node identifier. + * @alias node_id */ ni?: NodeId + /** The IP address for the node. */ ip?: string + /** The IP address for the node. + * @alias ip */ i?: string + /** The bound transport port for the node. */ port?: string + /** The bound transport port for the node. + * @alias port */ po?: string + /** The node name. */ node?: string + /** The node name. + * @alias node */ n?: string + /** The Elasticsearch version. */ version?: VersionString + /** The Elasticsearch version. + * @alias version */ v?: VersionString + /** The X-Opaque-ID header. */ x_opaque_id?: string + /** The X-Opaque-ID header. + * @alias x_opaque_id */ x?: string + /** The task action description. */ description?: string + /** The task action description. + * @alias description */ desc?: string } export interface CatTemplatesRequest extends CatCatRequestBase { + /** The name of the template to return. + * Accepts wildcard expressions. If omitted, all templates are returned. */ name?: Name + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } } export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] export interface CatTemplatesTemplatesRecord { + /** The template name. */ name?: Name + /** The template name. + * @alias name */ n?: Name + /** The template index patterns. */ index_patterns?: string + /** The template index patterns. + * @alias index_patterns */ t?: string + /** The template application order or priority number. */ order?: string + /** The template application order or priority number. + * @alias order */ o?: string + /** The template application order or priority number. + * @alias order */ p?: string + /** The template version. */ version?: VersionString | null + /** The template version. + * @alias version */ v?: VersionString | null + /** The component templates that comprise the index template. */ composed_of?: string + /** The component templates that comprise the index template. + * @alias composed_of */ c?: string } export interface CatThreadPoolRequest extends CatCatRequestBase { + /** A comma-separated list of thread pool names used to limit the request. + * Accepts wildcard expressions. */ thread_pool_patterns?: Names + /** List of columns to appear in the response. Supports simple wildcards. */ h?: CatCatThreadPoolColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names + /** The unit used to display time values. */ time?: TimeUnit + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean + /** The period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } } export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] export interface CatThreadPoolThreadPoolRecord { + /** The node name. */ node_name?: string + /** The node name. + * @alias node_name */ nn?: string + /** The persistent node identifier. */ node_id?: NodeId + /** The persistent node identifier. + * @alias node_id */ id?: NodeId + /** The ephemeral node identifier. */ ephemeral_node_id?: string + /** The ephemeral node identifier. + * @alias ephemeral_node_id */ eid?: string + /** The process identifier. */ pid?: string + /** The process identifier. + * @alias pid */ p?: string + /** The host name for the current node. */ host?: string + /** The host name for the current node. + * @alias host */ h?: string + /** The IP address for the current node. */ ip?: string + /** The IP address for the current node. + * @alias ip */ i?: string + /** The bound transport port for the current node. */ port?: string + /** The bound transport port for the current node. + * @alias port */ po?: string + /** The thread pool name. */ name?: string + /** The thread pool name. + * @alias name */ n?: string + /** The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. */ type?: string + /** The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. + * @alias type */ t?: string + /** The number of active threads in the current thread pool. */ active?: string + /** The number of active threads in the current thread pool. + * @alias active */ a?: string + /** The number of threads in the current thread pool. */ pool_size?: string + /** The number of threads in the current thread pool. + * @alias pool_size */ psz?: string + /** The number of tasks currently in queue. */ queue?: string + /** The number of tasks currently in queue. + * @alias queue */ q?: string + /** The maximum number of tasks permitted in the queue. */ queue_size?: string + /** The maximum number of tasks permitted in the queue. + * @alias queue_size */ qs?: string + /** The number of rejected tasks. */ rejected?: string + /** The number of rejected tasks. + * @alias rejected */ r?: string + /** The highest number of active threads in the current thread pool. */ largest?: string + /** The highest number of active threads in the current thread pool. + * @alias largest */ l?: string + /** The number of completed tasks. */ completed?: string + /** The number of completed tasks. + * @alias completed */ c?: string + /** The core number of active threads allowed in a scaling thread pool. */ core?: string | null + /** The core number of active threads allowed in a scaling thread pool. + * @alias core */ cr?: string | null + /** The maximum number of active threads allowed in a scaling thread pool. */ max?: string | null + /** The maximum number of active threads allowed in a scaling thread pool. + * @alias max */ mx?: string | null + /** The number of active threads allowed in a fixed thread pool. */ size?: string | null + /** The number of active threads allowed in a fixed thread pool. + * @alias size */ sz?: string | null + /** The thread keep alive time. */ keep_alive?: string | null + /** The thread keep alive time. + * @alias keep_alive */ ka?: string | null } export interface CatTransformsRequest extends CatCatRequestBase { + /** A transform identifier or a wildcard expression. + * If you do not specify one of these options, the API returns information for all transforms. */ transform_id?: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of transforms. */ from?: integer + /** Comma-separated list of column names to display. */ h?: CatCatTransformColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatCatTransformColumns + /** The unit used to display time values. */ time?: TimeUnit + /** The maximum number of transforms to obtain. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, time?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, time?: never, size?: never } } export type CatTransformsResponse = CatTransformsTransformsRecord[] export interface CatTransformsTransformsRecord { + /** The transform identifier. */ id?: Id + /** The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. */ state?: string + /** The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. + * @alias state */ s?: string + /** The sequence number for the checkpoint. */ checkpoint?: string + /** The sequence number for the checkpoint. + * @alias checkpoint */ c?: string + /** The number of documents that have been processed from the source index of the transform. */ documents_processed?: string + /** The number of documents that have been processed from the source index of the transform. + * @alias documents_processed */ docp?: string + /** The number of documents that have been processed from the source index of the transform. + * @alias documents_processed */ documentsProcessed?: string + /** The progress of the next checkpoint that is currently in progress. */ checkpoint_progress?: string | null + /** The progress of the next checkpoint that is currently in progress. + * @alias checkpoint_progress */ cp?: string | null + /** The progress of the next checkpoint that is currently in progress. + * @alias checkpoint_progress */ checkpointProgress?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. */ last_search_time?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. + * @alias last_search_time */ lst?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. + * @alias last_search_time */ lastSearchTime?: string | null + /** The timestamp when changes were last detected in the source indices. */ changes_last_detection_time?: string | null + /** The timestamp when changes were last detected in the source indices. + * @alias changes_last_detection_time */ cldt?: string | null + /** The time the transform was created. */ create_time?: string + /** The time the transform was created. + * @alias create_time */ ct?: string + /** The time the transform was created. + * @alias create_time */ createTime?: string + /** The version of Elasticsearch that existed on the node when the transform was created. */ version?: VersionString + /** The version of Elasticsearch that existed on the node when the transform was created. + * @alias version */ v?: VersionString + /** The source indices for the transform. */ source_index?: string + /** The source indices for the transform. + * @alias source_index */ si?: string + /** The source indices for the transform. + * @alias source_index */ sourceIndex?: string + /** The destination index for the transform. */ dest_index?: string + /** The destination index for the transform. + * @alias dest_index */ di?: string + /** The destination index for the transform. + * @alias dest_index */ destIndex?: string + /** The unique identifier for the ingest pipeline. */ pipeline?: string + /** The unique identifier for the ingest pipeline. + * @alias pipeline */ p?: string + /** The description of the transform. */ description?: string + /** The description of the transform. + * @alias description */ d?: string + /** The type of transform: `batch` or `continuous`. */ transform_type?: string + /** The type of transform: `batch` or `continuous`. + * @alias transform_type */ tt?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. */ frequency?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. + * @alias frequency */ f?: string + /** The initial page size that is used for the composite aggregation for each checkpoint. */ max_page_search_size?: string + /** The initial page size that is used for the composite aggregation for each checkpoint. + * @alias max_page_search_size */ mpsz?: string + /** The number of input documents per second. */ docs_per_second?: string + /** The number of input documents per second. + * @alias docs_per_second */ dps?: string + /** If a transform has a `failed` state, these details describe the reason for failure. */ reason?: string + /** If a transform has a `failed` state, these details describe the reason for failure. + * @alias reason */ r?: string + /** The total number of search operations on the source index for the transform. */ search_total?: string + /** The total number of search operations on the source index for the transform. + * @alias search_total */ st?: string + /** The total number of search failures. */ search_failure?: string + /** The total number of search failures. + * @alias search_failure */ sf?: string + /** The total amount of search time, in milliseconds. */ search_time?: string + /** The total amount of search time, in milliseconds. + * @alias search_time */ stime?: string + /** The total number of index operations done by the transform. */ index_total?: string + /** The total number of index operations done by the transform. + * @alias index_total */ it?: string + /** The total number of indexing failures. */ index_failure?: string + /** The total number of indexing failures. + * @alias index_failure */ if?: string + /** The total time spent indexing documents, in milliseconds. */ index_time?: string + /** The total time spent indexing documents, in milliseconds. + * @alias index_time */ itime?: string + /** The number of documents that have been indexed into the destination index for the transform. */ documents_indexed?: string + /** The number of documents that have been indexed into the destination index for the transform. + * @alias documents_indexed */ doci?: string + /** The total time spent deleting documents, in milliseconds. */ delete_time?: string + /** The total time spent deleting documents, in milliseconds. + * @alias delete_time */ dtime?: string + /** The number of documents deleted from the destination index due to the retention policy for the transform. */ documents_deleted?: string + /** The number of documents deleted from the destination index due to the retention policy for the transform. + * @alias documents_deleted */ docd?: string + /** The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. */ trigger_count?: string + /** The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. + * @alias trigger_count */ tc?: string + /** The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. */ pages_processed?: string + /** The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. + * @alias pages_processed */ pp?: string + /** The total time spent processing results, in milliseconds. */ processing_time?: string + /** The total time spent processing results, in milliseconds. + * @alias processing_time */ pt?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. */ checkpoint_duration_time_exp_avg?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. + * @alias checkpoint_duration_time_exp_avg */ cdtea?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. + * @alias checkpoint_duration_time_exp_avg */ checkpointTimeExpAvg?: string + /** The exponential moving average of the number of new documents that have been indexed. */ indexed_documents_exp_avg?: string + /** The exponential moving average of the number of new documents that have been indexed. + * @alias indexed_documents_exp_avg */ idea?: string + /** The exponential moving average of the number of documents that have been processed. */ processed_documents_exp_avg?: string + /** The exponential moving average of the number of documents that have been processed. + * @alias processed_documents_exp_avg */ pdea?: string } export interface CcrFollowIndexStats { + /** The name of the follower index. */ index: IndexName + /** An array of shard-level following task statistics. */ shards: CcrShardStats[] } export interface CcrReadException { + /** The exception that caused the read to fail. */ exception: ErrorCause + /** The starting sequence number of the batch requested from the leader. */ from_seq_no: SequenceNumber + /** The number of times the batch has been retried. */ retries: integer } export interface CcrShardStats { + /** The total of transferred bytes read from the leader. + * This is only an estimate and does not account for compression if enabled. */ bytes_read: long + /** The number of failed reads. */ failed_read_requests: long + /** The number of failed bulk write requests on the follower. */ failed_write_requests: long fatal_exception?: ErrorCause + /** The index aliases version the follower is synced up to. */ follower_aliases_version: VersionNumber + /** The current global checkpoint on the follower. + * The difference between the `leader_global_checkpoint` and the `follower_global_checkpoint` is an indication of how much the follower is lagging the leader. */ follower_global_checkpoint: long + /** The name of the follower index. */ follower_index: string + /** The mapping version the follower is synced up to. */ follower_mapping_version: VersionNumber + /** The current maximum sequence number on the follower. */ follower_max_seq_no: SequenceNumber + /** The index settings version the follower is synced up to. */ follower_settings_version: VersionNumber + /** The starting sequence number of the last batch of operations requested from the leader. */ last_requested_seq_no: SequenceNumber + /** The current global checkpoint on the leader known to the follower task. */ leader_global_checkpoint: long + /** The name of the index in the leader cluster being followed. */ leader_index: string + /** The current maximum sequence number on the leader known to the follower task. */ leader_max_seq_no: SequenceNumber + /** The total number of operations read from the leader. */ operations_read: long + /** The number of operations written on the follower. */ operations_written: long + /** The number of active read requests from the follower. */ outstanding_read_requests: integer + /** The number of active bulk write requests on the follower. */ outstanding_write_requests: integer + /** An array of objects representing failed reads. */ read_exceptions: CcrReadException[] + /** The remote cluster containing the leader index. */ remote_cluster: string + /** The numerical shard ID, with values from 0 to one less than the number of replicas. */ shard_id: integer + /** The number of successful fetches. */ successful_read_requests: long + /** The number of bulk write requests run on the follower. */ successful_write_requests: long time_since_last_read?: Duration + /** The number of milliseconds since a read request was sent to the leader. + * When the follower is caught up to the leader, this number will increase up to the configured `read_poll_timeout` at which point another read request will be sent to the leader. */ time_since_last_read_millis: DurationValue total_read_remote_exec_time?: Duration + /** The total time reads spent running on the remote cluster. */ total_read_remote_exec_time_millis: DurationValue total_read_time?: Duration + /** The total time reads were outstanding, measured from the time a read was sent to the leader to the time a reply was returned to the follower. */ total_read_time_millis: DurationValue total_write_time?: Duration + /** The total time spent writing on the follower. */ total_write_time_millis: DurationValue + /** The number of write operations queued on the follower. */ write_buffer_operation_count: long + /** The total number of bytes of operations currently queued for writing. */ write_buffer_size_in_bytes: ByteSize } export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { + /** The auto-follow pattern collection to delete. */ name: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrFollowRequest extends RequestBase { + /** The name of the follower index. */ index: IndexName + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be + * active. + * A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the + * remote Lucene segment files to the follower index. */ wait_for_active_shards?: WaitForActiveShards + /** If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. */ data_stream_name?: string + /** The name of the index in the leader cluster to follow. */ leader_index: IndexName + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: long + /** The maximum number of outstanding write requests on the follower. */ max_outstanding_write_requests?: integer + /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when + * retrying. */ max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be + * deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will + * be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. + * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. + * Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration + /** The remote cluster containing the leader index. */ remote_cluster: string + /** Settings to override from the leader index. */ settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, wait_for_active_shards?: never, data_stream_name?: never, leader_index?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never, remote_cluster?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, wait_for_active_shards?: never, data_stream_name?: never, leader_index?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never, remote_cluster?: never, settings?: never } } export interface CcrFollowResponse { @@ -9060,31 +15321,59 @@ export interface CcrFollowResponse { } export interface CcrFollowInfoFollowerIndex { + /** The name of the follower index. */ follower_index: IndexName + /** The name of the index in the leader cluster that is followed. */ leader_index: IndexName + /** An object that encapsulates cross-cluster replication parameters. If the follower index's status is paused, this object is omitted. */ parameters?: CcrFollowInfoFollowerIndexParameters + /** The remote cluster that contains the leader index. */ remote_cluster: Name + /** The status of the index following: `active` or `paused`. */ status: CcrFollowInfoFollowerIndexStatus } export interface CcrFollowInfoFollowerIndexParameters { + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: long + /** The maximum number of outstanding write requests on the follower. */ max_outstanding_write_requests?: integer + /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when + * retrying. */ max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be + * deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will + * be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. + * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. + * Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration } export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' export interface CcrFollowInfoRequest extends RequestBase { + /** A comma-delimited list of follower index patterns. */ index: Indices + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } } export interface CcrFollowInfoResponse { @@ -9092,21 +15381,35 @@ export interface CcrFollowInfoResponse { } export interface CcrFollowStatsRequest extends RequestBase { + /** A comma-delimited list of index patterns. */ index: Indices + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, timeout?: never } } export interface CcrFollowStatsResponse { + /** An array of follower index statistics. */ indices: CcrFollowIndexStats[] } export interface CcrForgetFollowerRequest extends RequestBase { + /** the name of the leader index for which specified follower retention leases should be removed */ index: IndexName + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration follower_cluster?: string follower_index?: IndexName follower_index_uuid?: Uuid leader_remote_cluster?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, timeout?: never, follower_cluster?: never, follower_index?: never, follower_index_uuid?: never, leader_remote_cluster?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, timeout?: never, follower_cluster?: never, follower_index?: never, follower_index_uuid?: never, leader_remote_cluster?: never } } export interface CcrForgetFollowerResponse { @@ -9120,16 +15423,30 @@ export interface CcrGetAutoFollowPatternAutoFollowPattern { export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { active: boolean + /** The remote cluster containing the leader indices to match against. */ remote_cluster: string + /** The name of follower index. */ follow_index_pattern?: IndexPattern + /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ leader_index_patterns: IndexPatterns + /** An array of simple index patterns that can be used to exclude indices from being auto-followed. */ leader_index_exclusion_patterns: IndexPatterns + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests: integer } export interface CcrGetAutoFollowPatternRequest extends RequestBase { + /** The auto-follow pattern collection that you want to retrieve. + * If you do not specify a name, the API returns information for all collections. */ name?: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export interface CcrGetAutoFollowPatternResponse { @@ -9137,50 +15454,97 @@ export interface CcrGetAutoFollowPatternResponse { } export interface CcrPauseAutoFollowPatternRequest extends RequestBase { + /** The name of the auto-follow pattern to pause. */ name: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrPauseFollowRequest extends RequestBase { + /** The name of the follower index. */ index: IndexName + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } } export type CcrPauseFollowResponse = AcknowledgedResponseBase export interface CcrPutAutoFollowPatternRequest extends RequestBase { + /** The name of the collection of auto-follow patterns. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** The remote cluster containing the leader indices to match against. */ remote_cluster: string + /** The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. */ follow_index_pattern?: IndexPattern + /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ leader_index_patterns?: IndexPatterns + /** An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. */ leader_index_exclusion_patterns?: IndexPatterns + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: integer + /** Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). */ settings?: Record + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_write_requests?: integer + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration + /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. */ max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, remote_cluster?: never, follow_index_pattern?: never, leader_index_patterns?: never, leader_index_exclusion_patterns?: never, max_outstanding_read_requests?: never, settings?: never, max_outstanding_write_requests?: never, read_poll_timeout?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, remote_cluster?: never, follow_index_pattern?: never, leader_index_patterns?: never, leader_index_exclusion_patterns?: never, max_outstanding_read_requests?: never, settings?: never, max_outstanding_write_requests?: never, read_poll_timeout?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never } } export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeAutoFollowPatternRequest extends RequestBase { + /** The name of the auto-follow pattern to resume. */ name: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeFollowRequest extends RequestBase { + /** The name of the follow index to resume following. */ index: IndexName + /** Period to wait for a connection to the master node. */ master_timeout?: Duration max_outstanding_read_requests?: long max_outstanding_write_requests?: long @@ -9192,15 +15556,24 @@ export interface CcrResumeFollowRequest extends RequestBase { max_write_request_operation_count?: long max_write_request_size?: string read_poll_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never } } export type CcrResumeFollowResponse = AcknowledgedResponseBase export interface CcrStatsAutoFollowStats { auto_followed_clusters: CcrStatsAutoFollowedCluster[] + /** The number of indices that the auto-follow coordinator failed to automatically follow. + * The causes of recent failures are captured in the logs of the elected master node and in the `auto_follow_stats.recent_auto_follow_errors` field. */ number_of_failed_follow_indices: long + /** The number of times that the auto-follow coordinator failed to retrieve the cluster state from a remote cluster registered in a collection of auto-follow patterns. */ number_of_failed_remote_cluster_state_requests: long + /** The number of indices that the auto-follow coordinator successfully followed. */ number_of_successful_follow_indices: long + /** An array of objects representing failures by the auto-follow coordinator. */ recent_auto_follow_errors: ErrorCause[] } @@ -9215,18 +15588,36 @@ export interface CcrStatsFollowStats { } export interface CcrStatsRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export interface CcrStatsResponse { + /** Statistics for the auto-follow coordinator. */ auto_follow_stats: CcrStatsAutoFollowStats + /** Shard-level statistics for follower indices. */ follow_stats: CcrStatsFollowStats } export interface CcrUnfollowRequest extends RequestBase { + /** The name of the follower index. */ index: IndexName + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } } export type CcrUnfollowResponse = AcknowledgedResponseBase @@ -9316,13 +15707,24 @@ export interface ClusterAllocationExplainNodeDiskUsage { } export interface ClusterAllocationExplainRequest extends RequestBase { + /** If true, returns information about disk usage and shard sizes. */ include_disk_info?: boolean + /** If true, returns YES decisions in explanation. */ include_yes_decisions?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. */ current_node?: string + /** Specifies the name of the index that you would like an explanation for. */ index?: IndexName + /** If true, returns explanation for the primary shard for the given shard ID. */ primary?: boolean + /** Specifies the ID of the shard that you would like an explanation for. */ shard?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, current_node?: never, index?: never, primary?: never, shard?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, current_node?: never, index?: never, primary?: never, shard?: never } } export interface ClusterAllocationExplainReservedSize { @@ -9373,35 +15775,79 @@ export interface ClusterAllocationExplainUnassignedInformation { export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' export interface ClusterDeleteComponentTemplateRequest extends RequestBase { + /** Comma-separated list or wildcard expression of component template names used to limit the request. */ name: Names + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Specifies whether to wait for all excluded nodes to be removed from the + * cluster before clearing the voting configuration exclusions list. + * Defaults to true, meaning that all excluded nodes must be removed from + * the cluster before this API takes any action. If set to false then the + * voting configuration exclusions list is cleared even if some excluded + * nodes are still in the cluster. */ wait_for_removal?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, wait_for_removal?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, wait_for_removal?: never } } export type ClusterDeleteVotingConfigExclusionsResponse = boolean export interface ClusterExistsComponentTemplateRequest extends RequestBase { + /** Comma-separated list of component template names used to limit the request. + * Wildcard (*) expressions are supported. */ name: Names + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration + /** If true, the request retrieves information from the local node only. + * Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, local?: never } } export type ClusterExistsComponentTemplateResponse = boolean export interface ClusterGetComponentTemplateRequest extends RequestBase { + /** Comma-separated list of component template names used to limit the request. + * Wildcard (`*`) expressions are supported. */ name?: Name + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys */ settings_filter?: string | string[] + /** Return all default configurations for the component template (default: false) */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. + * If `false`, information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, settings_filter?: never, include_defaults?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, settings_filter?: never, include_defaults?: never, local?: never, master_timeout?: never } } export interface ClusterGetComponentTemplateResponse { @@ -9409,37 +15855,72 @@ export interface ClusterGetComponentTemplateResponse { } export interface ClusterGetSettingsRequest extends RequestBase { + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `true`, also returns default values for all other cluster settings, reflecting the values + * in the `elasticsearch.yml` file of one of the nodes in the cluster. If the nodes in your + * cluster do not all have the same values in their `elasticsearch.yml` config files then the + * values returned by this API may vary from invocation to invocation and may not reflect the + * values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to + * fetch the settings for each individual node in your cluster. */ include_defaults?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { flat_settings?: never, include_defaults?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { flat_settings?: never, include_defaults?: never, master_timeout?: never, timeout?: never } } export interface ClusterGetSettingsResponse { + /** The settings that persist after the cluster restarts. */ persistent: Record + /** The settings that do not persist after the cluster restarts. */ transient: Record + /** The default setting values. */ defaults?: Record } export interface ClusterHealthHealthResponseBody { + /** The number of active primary shards. */ active_primary_shards: integer + /** The total number of active primary and replica shards. */ active_shards: integer + /** The ratio of active shards in the cluster expressed as a string formatted percentage. */ active_shards_percent?: string + /** The ratio of active shards in the cluster expressed as a percentage. */ active_shards_percent_as_number: double + /** The name of the cluster. */ cluster_name: Name + /** The number of shards whose allocation has been delayed by the timeout settings. */ delayed_unassigned_shards: integer indices?: Record + /** The number of shards that are under initialization. */ initializing_shards: integer + /** The number of nodes that are dedicated data nodes. */ number_of_data_nodes: integer + /** The number of unfinished fetches. */ number_of_in_flight_fetch: integer + /** The number of nodes within the cluster. */ number_of_nodes: integer + /** The number of cluster-level changes that have not yet been executed. */ number_of_pending_tasks: integer + /** The number of shards that are under relocation. */ relocating_shards: integer status: HealthStatus + /** The time since the earliest initiated task is waiting for being performed. */ task_max_waiting_in_queue?: Duration + /** The time expressed in milliseconds since the earliest initiated task is waiting for being performed. */ task_max_waiting_in_queue_millis: DurationValue + /** If false the response returned within the period of time that is specified by the timeout parameter (30s by default) */ timed_out: boolean + /** The number of primary shards that are not allocated. */ unassigned_primary_shards: integer + /** The number of shards that are not allocated. */ unassigned_shards: integer } @@ -9457,18 +15938,34 @@ export interface ClusterHealthIndexHealthStats { } export interface ClusterHealthRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. */ index?: Indices + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Can be one of cluster, indices or shards. Controls the details level of the health information returned. */ level?: Level + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. */ wait_for_active_shards?: WaitForActiveShards + /** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */ wait_for_events?: WaitForEvents + /** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. */ wait_for_status?: HealthStatus + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, expand_wildcards?: never, level?: never, local?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, wait_for_events?: never, wait_for_nodes?: never, wait_for_no_initializing_shards?: never, wait_for_no_relocating_shards?: never, wait_for_status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, expand_wildcards?: never, level?: never, local?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, wait_for_events?: never, wait_for_nodes?: never, wait_for_no_initializing_shards?: never, wait_for_no_relocating_shards?: never, wait_for_status?: never } } export type ClusterHealthResponse = ClusterHealthHealthResponseBody @@ -9486,7 +15983,12 @@ export interface ClusterHealthShardHealthStats { export type ClusterHealthWaitForNodes = string | integer export interface ClusterInfoRequest extends RequestBase { + /** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */ target: ClusterInfoTargets + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { target?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { target?: never } } export interface ClusterInfoResponse { @@ -9498,17 +16000,32 @@ export interface ClusterInfoResponse { } export interface ClusterPendingTasksPendingTask { + /** Indicates whether the pending tasks are currently executing or not. */ executing: boolean + /** The number that represents when the task has been inserted into the task queue. */ insert_order: integer + /** The priority of the pending task. + * The valid priorities in descending priority order are: `IMMEDIATE` > `URGENT` > `HIGH` > `NORMAL` > `LOW` > `LANGUID`. */ priority: string + /** A general description of the cluster task that may include a reason and origin. */ source: string + /** The time since the task is waiting for being performed. */ time_in_queue?: Duration + /** The time expressed in milliseconds since the task is waiting for being performed. */ time_in_queue_millis: DurationValue } export interface ClusterPendingTasksRequest extends RequestBase { + /** If `true`, the request retrieves information from the local node only. + * If `false`, information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } } export interface ClusterPendingTasksResponse { @@ -9516,33 +16033,78 @@ export interface ClusterPendingTasksResponse { } export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { + /** A comma-separated list of the names of the nodes to exclude from the + * voting configuration. If specified, you may not also specify node_ids. */ node_names?: Names + /** A comma-separated list of the persistent ids of the nodes to exclude + * from the voting configuration. If specified, you may not also specify node_names. */ node_ids?: Ids + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** When adding a voting configuration exclusion, the API waits for the + * specified nodes to be excluded from the voting configuration before + * returning. If the timeout expires before the appropriate condition + * is satisfied, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_names?: never, node_ids?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_names?: never, node_ids?: never, master_timeout?: never, timeout?: never } } export type ClusterPostVotingConfigExclusionsResponse = boolean export interface ClusterPutComponentTemplateRequest extends RequestBase { + /** Name of the component template to create. + * Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. + * Elastic Agent uses these templates to configure backing indices for its data streams. + * If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. + * If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. */ name: Name + /** If `true`, this request cannot replace or update existing component templates. */ create?: boolean + /** User defined reason for create the component template. */ cause?: string + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The template to be applied which includes mappings, settings, or aliases configuration. */ template: IndicesIndexState + /** Version number used to manage component templates externally. + * This number isn't automatically generated or incremented by Elasticsearch. + * To unset a version, replace the template without specifying a version. */ version?: VersionNumber + /** Optional user metadata about the component template. + * It may have any contents. This map is not automatically generated by Elasticsearch. + * This information is stored in the cluster state, so keeping it short is preferable. + * To unset `_meta`, replace the template without specifying this information. */ _meta?: Metadata + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } } export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { + /** Return settings in flat format (default: false) */ flat_settings?: boolean + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** The settings that persist after the cluster restarts. */ persistent?: Record + /** The settings that do not persist after the cluster restarts. */ transient?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { flat_settings?: never, master_timeout?: never, timeout?: never, persistent?: never, transient?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { flat_settings?: never, master_timeout?: never, timeout?: never, persistent?: never, transient?: never } } export interface ClusterPutSettingsResponse { @@ -9554,37 +16116,65 @@ export interface ClusterPutSettingsResponse { export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo export interface ClusterRemoteInfoClusterRemoteProxyInfo { + /** The connection mode for the remote cluster. */ mode: 'proxy' + /** If it is `true`, there is at least one open connection to the remote cluster. + * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. + * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ connected: boolean + /** The initial connect timeout for remote cluster connections. */ initial_connect_timeout: Duration + /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ skip_unavailable: boolean + /** The address for remote connections when proxy mode is configured. */ proxy_address: string server_name: string + /** The number of open socket connections to the remote cluster when proxy mode is configured. */ num_proxy_sockets_connected: integer + /** The maximum number of socket connections to the remote cluster when proxy mode is configured. */ max_proxy_socket_connections: integer + /** This field is present and has a value of `::es_redacted::` only when the remote cluster is configured with the API key based model. Otherwise, the field is not present. */ cluster_credentials?: string } export interface ClusterRemoteInfoClusterRemoteSniffInfo { + /** The connection mode for the remote cluster. */ mode: 'sniff' + /** If it is `true`, there is at least one open connection to the remote cluster. + * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. + * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ connected: boolean + /** The maximum number of connections maintained for the remote cluster when sniff mode is configured. */ max_connections_per_cluster: integer + /** The number of connected nodes in the remote cluster when sniff mode is configured. */ num_nodes_connected: long + /** The initial connect timeout for remote cluster connections. */ initial_connect_timeout: Duration + /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ skip_unavailable: boolean + /** The initial seed transport addresses of the remote cluster when sniff mode is configured. */ seeds: string[] } export interface ClusterRemoteInfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export type ClusterRemoteInfoResponse = Record export interface ClusterRerouteCommand { + /** Cancel allocation of a shard (or recovery). Accepts index and shard for index name and shard number, and node for the node to cancel the shard allocation on. This can be used to force resynchronization of existing replicas from the primary shard by cancelling them and allowing them to be reinitialized through the standard recovery process. By default only replica shard allocations can be cancelled. If it is necessary to cancel the allocation of a primary shard then the allow_primary flag must also be included in the request. */ cancel?: ClusterRerouteCommandCancelAction + /** Move a started shard from one node to another node. Accepts index and shard for index name and shard number, from_node for the node to move the shard from, and to_node for the node to move the shard to. */ move?: ClusterRerouteCommandMoveAction + /** Allocate an unassigned replica shard to a node. Accepts index and shard for index name and shard number, and node to allocate the shard to. Takes allocation deciders into account. */ allocate_replica?: ClusterRerouteCommandAllocateReplicaAction + /** Allocate a primary shard to a node that holds a stale copy. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command may lead to data loss for the provided shard id. If a node which has the good copy of the data rejoins the cluster later on, that data will be deleted or overwritten with the data of the stale copy that was forcefully allocated with this command. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction + /** Allocate an empty primary shard to a node. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command leads to a complete loss of all data that was indexed into this shard, if it was previously started. If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction } @@ -9592,6 +16182,7 @@ export interface ClusterRerouteCommandAllocatePrimaryAction { index: IndexName shard: integer node: string + /** If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true */ accept_data_loss: boolean } @@ -9611,18 +16202,32 @@ export interface ClusterRerouteCommandCancelAction { export interface ClusterRerouteCommandMoveAction { index: IndexName shard: integer + /** The node to move the shard from */ from_node: string + /** The node to move the shard to */ to_node: string } export interface ClusterRerouteRequest extends RequestBase { + /** If true, then the request simulates the operation. + * It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. */ dry_run?: boolean + /** If true, then the response contains an explanation of why the commands can or cannot run. */ explain?: boolean + /** Limits the information returned to the specified metrics. */ metric?: Metrics + /** If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. */ retry_failed?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Defines the commands to perform. */ commands?: ClusterRerouteCommand[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, explain?: never, metric?: never, retry_failed?: never, master_timeout?: never, timeout?: never, commands?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, explain?: never, metric?: never, retry_failed?: never, master_timeout?: never, timeout?: never, commands?: never } } export interface ClusterRerouteRerouteDecision { @@ -9649,66 +16254,115 @@ export interface ClusterRerouteRerouteParameters { export interface ClusterRerouteResponse { acknowledged: boolean explanations?: ClusterRerouteRerouteExplanation[] + /** There aren't any guarantees on the output/structure of the raw cluster state. + * Here you will find the internal representation of the cluster, which can + * differ from the external representation. */ state?: any } export interface ClusterStateRequest extends RequestBase { + /** Limit the information returned to the specified metrics */ metric?: Metrics + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Return settings in flat format (default: false) */ flat_settings?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** Return local information, do not retrieve the state from master node (default: false) */ local?: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Wait for the metadata version to be equal or greater than the specified metadata version */ wait_for_metadata_version?: VersionNumber + /** The maximum time to wait for wait_for_metadata_version before timing out */ wait_for_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, wait_for_metadata_version?: never, wait_for_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, wait_for_metadata_version?: never, wait_for_timeout?: never } } export type ClusterStateResponse = any export interface ClusterStatsCCSStats { + /** Contains remote cluster settings and metrics collected from them. + * The keys are cluster names, and the values are per-cluster data. + * Only present if `include_remotes` option is set to true. */ clusters?: Record + /** Information about cross-cluster search usage. */ _search: ClusterStatsCCSUsageStats + /** Information about ES|QL cross-cluster query usage. */ _esql?: ClusterStatsCCSUsageStats } export interface ClusterStatsCCSUsageClusterStats { + /** The total number of successful (not skipped) cross-cluster search requests that were executed against this cluster. This may include requests where partial results were returned, but not requests in which the cluster has been skipped entirely. */ total: integer + /** The total number of cross-cluster search requests for which this cluster was skipped. */ skipped: integer + /** Statistics about the time taken to execute requests against this cluster. */ took: ClusterStatsCCSUsageTimeValue } export interface ClusterStatsCCSUsageStats { + /** The total number of cross-cluster search requests that have been executed by the cluster. */ total: integer + /** The total number of cross-cluster search requests that have been successfully executed by the cluster. */ success: integer + /** The total number of cross-cluster search requests (successful or failed) that had at least one remote cluster skipped. */ skipped: integer + /** Statistics about the time taken to execute cross-cluster search requests. */ took: ClusterStatsCCSUsageTimeValue + /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `true`. */ took_mrt_true?: ClusterStatsCCSUsageTimeValue + /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `false`. */ took_mrt_false?: ClusterStatsCCSUsageTimeValue + /** The maximum number of remote clusters that were queried in a single cross-cluster search request. */ remotes_per_search_max: integer + /** The average number of remote clusters that were queried in a single cross-cluster search request. */ remotes_per_search_avg: double + /** Statistics about the reasons for cross-cluster search request failures. The keys are the failure reason names and the values are the number of requests that failed for that reason. */ failure_reasons: Record + /** The keys are the names of the search feature, and the values are the number of requests that used that feature. Single request can use more than one feature (e.g. both `async` and `wildcard`). */ features: Record + /** Statistics about the clients that executed cross-cluster search requests. The keys are the names of the clients, and the values are the number of requests that were executed by that client. Only known clients (such as `kibana` or `elasticsearch`) are counted. */ clients: Record + /** Statistics about the clusters that were queried in cross-cluster search requests. The keys are cluster names, and the values are per-cluster telemetry data. This also includes the local cluster itself, which uses the name `(local)`. */ clusters: Record } export interface ClusterStatsCCSUsageTimeValue { + /** The maximum time taken to execute a request, in milliseconds. */ max: DurationValue + /** The average time taken to execute a request, in milliseconds. */ avg: DurationValue + /** The 90th percentile of the time taken to execute requests, in milliseconds. */ p90: DurationValue } export interface ClusterStatsCharFilterTypes { + /** Contains statistics about analyzer types used in selected nodes. */ analyzer_types: ClusterStatsFieldTypes[] + /** Contains statistics about built-in analyzers used in selected nodes. */ built_in_analyzers: ClusterStatsFieldTypes[] + /** Contains statistics about built-in character filters used in selected nodes. */ built_in_char_filters: ClusterStatsFieldTypes[] + /** Contains statistics about built-in token filters used in selected nodes. */ built_in_filters: ClusterStatsFieldTypes[] + /** Contains statistics about built-in tokenizers used in selected nodes. */ built_in_tokenizers: ClusterStatsFieldTypes[] + /** Contains statistics about character filter types used in selected nodes. */ char_filter_types: ClusterStatsFieldTypes[] + /** Contains statistics about token filter types used in selected nodes. */ filter_types: ClusterStatsFieldTypes[] + /** Contains statistics about tokenizer types used in selected nodes. */ tokenizer_types: ClusterStatsFieldTypes[] + /** Contains statistics about synonyms types used in selected nodes. */ synonyms: Record } @@ -9716,11 +16370,21 @@ export interface ClusterStatsClusterFileSystem { path?: string mount?: string type?: string + /** Total number of bytes available to JVM in file stores across all selected nodes. + * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. + * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ available_in_bytes?: long + /** Total number of bytes available to JVM in file stores across all selected nodes. + * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. + * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ available?: ByteSize + /** Total number, in bytes, of unallocated bytes in file stores across all selected nodes. */ free_in_bytes?: long + /** Total number of unallocated bytes in file stores across all selected nodes. */ free?: ByteSize + /** Total size, in bytes, of all file stores across all selected nodes. */ total_in_bytes?: long + /** Total size of all file stores across all selected nodes. */ total?: ByteSize low_watermark_free_space?: ByteSize low_watermark_free_space_in_bytes?: long @@ -9733,32 +16397,55 @@ export interface ClusterStatsClusterFileSystem { } export interface ClusterStatsClusterIndices { + /** Contains statistics about analyzers and analyzer components used in selected nodes. */ analysis?: ClusterStatsCharFilterTypes + /** Contains statistics about memory used for completion in selected nodes. */ completion: CompletionStats + /** Total number of indices with shards assigned to selected nodes. */ count: long + /** Contains counts for documents in selected nodes. */ docs: DocStats + /** Contains statistics about the field data cache of selected nodes. */ fielddata: FielddataStats + /** Contains statistics about the query cache of selected nodes. */ query_cache: QueryCacheStats + /** Holds a snapshot of the search usage statistics. + * Used to hold the stats for a single node that's part of a ClusterStatsNodeResponse, as well as to + * accumulate stats for the entire cluster and return them as part of the ClusterStatsResponse. */ search: ClusterStatsSearchUsageStats + /** Contains statistics about segments in selected nodes. */ segments: SegmentsStats + /** Contains statistics about indices with shards assigned to selected nodes. */ shards: ClusterStatsClusterIndicesShards + /** Contains statistics about the size of shards assigned to selected nodes. */ store: StoreStats + /** Contains statistics about field mappings in selected nodes. */ mappings?: ClusterStatsFieldTypesMappings + /** Contains statistics about analyzers and analyzer components used in selected nodes. */ versions?: ClusterStatsIndicesVersions[] + /** Contains statistics about indexed dense vector */ dense_vector: ClusterStatsDenseVectorStats + /** Contains statistics about indexed sparse vector */ sparse_vector: ClusterStatsSparseVectorStats } export interface ClusterStatsClusterIndicesShards { + /** Contains statistics about shards assigned to selected nodes. */ index?: ClusterStatsClusterIndicesShardsIndex + /** Number of primary shards assigned to selected nodes. */ primaries?: double + /** Ratio of replica shards to primary shards across all selected nodes. */ replication?: double + /** Total number of shards assigned to selected nodes. */ total?: double } export interface ClusterStatsClusterIndicesShardsIndex { + /** Contains statistics about the number of primary shards assigned to selected nodes. */ primaries: ClusterStatsClusterShardMetrics + /** Contains statistics about the number of replication shards assigned to selected nodes. */ replication: ClusterStatsClusterShardMetrics + /** Contains statistics about the number of shards assigned to selected nodes. */ shards: ClusterStatsClusterShardMetrics } @@ -9768,32 +16455,51 @@ export interface ClusterStatsClusterIngest { } export interface ClusterStatsClusterJvm { + /** Uptime duration, in milliseconds, since JVM last started. */ max_uptime_in_millis: DurationValue + /** Uptime duration since JVM last started. */ max_uptime?: Duration + /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsClusterJvmMemory + /** Number of active threads in use by JVM across all selected nodes. */ threads: long + /** Contains statistics about the JVM versions used by selected nodes. */ versions: ClusterStatsClusterJvmVersion[] } export interface ClusterStatsClusterJvmMemory { + /** Maximum amount of memory, in bytes, available for use by the heap across all selected nodes. */ heap_max_in_bytes: long + /** Maximum amount of memory available for use by the heap across all selected nodes. */ heap_max?: ByteSize + /** Memory, in bytes, currently in use by the heap across all selected nodes. */ heap_used_in_bytes: long + /** Memory currently in use by the heap across all selected nodes. */ heap_used?: ByteSize } export interface ClusterStatsClusterJvmVersion { + /** Always `true`. All distributions come with a bundled Java Development Kit (JDK). */ bundled_jdk: boolean + /** Total number of selected nodes using JVM. */ count: integer + /** If `true`, a bundled JDK is in use by JVM. */ using_bundled_jdk: boolean + /** Version of JVM used by one or more selected nodes. */ version: VersionString + /** Name of the JVM. */ vm_name: string + /** Vendor of the JVM. */ vm_vendor: string + /** Full version number of JVM. + * The full version number includes a plus sign (+) followed by the build number. */ vm_version: VersionString } export interface ClusterStatsClusterNetworkTypes { + /** Contains statistics about the HTTP network types used by selected nodes. */ http_types: Record + /** Contains statistics about the transport network types used by selected nodes. */ transport_types: Record } @@ -9817,56 +16523,91 @@ export interface ClusterStatsClusterNodeCount { } export interface ClusterStatsClusterNodes { + /** Contains counts for nodes selected by the request’s node filters. */ count: ClusterStatsClusterNodeCount + /** Contains statistics about the discovery types used by selected nodes. */ discovery_types: Record + /** Contains statistics about file stores by selected nodes. */ fs: ClusterStatsClusterFileSystem indexing_pressure: ClusterStatsIndexingPressure ingest: ClusterStatsClusterIngest + /** Contains statistics about the Java Virtual Machines (JVMs) used by selected nodes. */ jvm: ClusterStatsClusterJvm + /** Contains statistics about the transport and HTTP networks used by selected nodes. */ network_types: ClusterStatsClusterNetworkTypes + /** Contains statistics about the operating systems used by selected nodes. */ os: ClusterStatsClusterOperatingSystem + /** Contains statistics about Elasticsearch distributions installed on selected nodes. */ packaging_types: ClusterStatsNodePackagingType[] + /** Contains statistics about installed plugins and modules by selected nodes. + * If no plugins or modules are installed, this array is empty. */ plugins: PluginStats[] + /** Contains statistics about processes used by selected nodes. */ process: ClusterStatsClusterProcess + /** Array of Elasticsearch versions used on selected nodes. */ versions: VersionString[] } export interface ClusterStatsClusterOperatingSystem { + /** Number of processors used to calculate thread pool size across all selected nodes. + * This number can be set with the processors setting of a node and defaults to the number of processors reported by the operating system. + * In both cases, this number will never be larger than 32. */ allocated_processors: integer + /** Contains statistics about processor architectures (for example, x86_64 or aarch64) used by selected nodes. */ architectures?: ClusterStatsClusterOperatingSystemArchitecture[] + /** Number of processors available to JVM across all selected nodes. */ available_processors: integer + /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsOperatingSystemMemoryInfo + /** Contains statistics about operating systems used by selected nodes. */ names: ClusterStatsClusterOperatingSystemName[] + /** Contains statistics about operating systems used by selected nodes. */ pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] } export interface ClusterStatsClusterOperatingSystemArchitecture { + /** Name of an architecture used by one or more selected nodes. */ arch: string + /** Number of selected nodes using the architecture. */ count: integer } export interface ClusterStatsClusterOperatingSystemName { + /** Number of selected nodes using the operating system. */ count: integer + /** Name of an operating system used by one or more selected nodes. */ name: Name } export interface ClusterStatsClusterOperatingSystemPrettyName { + /** Number of selected nodes using the operating system. */ count: integer + /** Human-readable name of an operating system used by one or more selected nodes. */ pretty_name: Name } export interface ClusterStatsClusterProcess { + /** Contains statistics about CPU used by selected nodes. */ cpu: ClusterStatsClusterProcessCpu + /** Contains statistics about open file descriptors in selected nodes. */ open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors } export interface ClusterStatsClusterProcessCpu { + /** Percentage of CPU used across all selected nodes. + * Returns `-1` if not supported. */ percent: integer } export interface ClusterStatsClusterProcessOpenFileDescriptors { + /** Average number of concurrently open file descriptors. + * Returns `-1` if not supported. */ avg: long + /** Maximum number of concurrently open file descriptors allowed across all selected nodes. + * Returns `-1` if not supported. */ max: long + /** Minimum number of concurrently open file descriptors across all selected nodes. + * Returns -1 if not supported. */ min: long } @@ -9879,8 +16620,11 @@ export interface ClusterStatsClusterProcessor { } export interface ClusterStatsClusterShardMetrics { + /** Mean number of shards in an index, counting only shards assigned to selected nodes. */ avg: double + /** Maximum number of shards in an index, counting only shards assigned to selected nodes. */ max: double + /** Minimum number of shards in an index, counting only shards assigned to selected nodes. */ min: double } @@ -9909,25 +16653,42 @@ export interface ClusterStatsDenseVectorStats { } export interface ClusterStatsFieldTypes { + /** The name for the field type in selected nodes. */ name: Name + /** The number of occurrences of the field type in selected nodes. */ count: integer + /** The number of indices containing the field type in selected nodes. */ index_count: integer + /** For dense_vector field types, number of indexed vector types in selected nodes. */ indexed_vector_count?: integer + /** For dense_vector field types, the maximum dimension of all indexed vector types in selected nodes. */ indexed_vector_dim_max?: integer + /** For dense_vector field types, the minimum dimension of all indexed vector types in selected nodes. */ indexed_vector_dim_min?: integer + /** The number of fields that declare a script. */ script_count?: integer + /** For dense_vector field types, count of mappings by index type */ vector_index_type_count?: Record + /** For dense_vector field types, count of mappings by similarity */ vector_similarity_type_count?: Record + /** For dense_vector field types, count of mappings by element type */ vector_element_type_count?: Record } export interface ClusterStatsFieldTypesMappings { + /** Contains statistics about field data types used in selected nodes. */ field_types: ClusterStatsFieldTypes[] + /** Contains statistics about runtime field data types used in selected nodes. */ runtime_field_types: ClusterStatsRuntimeFieldTypes[] + /** Total number of fields in all non-system indices. */ total_field_count?: long + /** Total number of fields in all non-system indices, accounting for mapping deduplication. */ total_deduplicated_field_count?: long + /** Total size of all mappings after deduplication and compression. */ total_deduplicated_mapping_size?: ByteSize + /** Total size of all mappings, in bytes, after deduplication and compression. */ total_deduplicated_mapping_size_in_bytes?: long + /** Source mode usage count. */ source_modes: Record } @@ -9944,21 +16705,34 @@ export interface ClusterStatsIndicesVersions { } export interface ClusterStatsNodePackagingType { + /** Number of selected nodes using the distribution flavor and file type. */ count: integer + /** Type of Elasticsearch distribution. This is always `default`. */ flavor: string + /** File type (such as `tar` or `zip`) used for the distribution package. */ type: string } export interface ClusterStatsOperatingSystemMemoryInfo { + /** Total amount, in bytes, of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ adjusted_total_in_bytes?: long + /** Total amount of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ adjusted_total?: ByteSize + /** Amount, in bytes, of free physical memory across all selected nodes. */ free_in_bytes: long + /** Amount of free physical memory across all selected nodes. */ free?: ByteSize + /** Percentage of free physical memory across all selected nodes. */ free_percent: integer + /** Total amount, in bytes, of physical memory across all selected nodes. */ total_in_bytes: long + /** Total amount of physical memory across all selected nodes. */ total?: ByteSize + /** Amount, in bytes, of physical memory in use across all selected nodes. */ used_in_bytes: long + /** Amount of physical memory in use across all selected nodes. */ used?: ByteSize + /** Percentage of physical memory in use across all selected nodes. */ used_percent: integer } @@ -9970,20 +16744,35 @@ export interface ClusterStatsPerRepositoryStats { } export interface ClusterStatsRemoteClusterInfo { + /** The UUID of the remote cluster. */ cluster_uuid: string + /** The connection mode used to communicate with the remote cluster. */ mode: string + /** The `skip_unavailable` setting used for this remote cluster. */ skip_unavailable: boolean + /** Transport compression setting used for this remote cluster. */ 'transport.compress': string + /** Health status of the cluster, based on the state of its primary and replica shards. */ status: HealthStatus + /** The list of Elasticsearch versions used by the nodes on the remote cluster. */ version: VersionString[] + /** The total count of nodes in the remote cluster. */ nodes_count: integer + /** The total number of shards in the remote cluster. */ shards_count: integer + /** The total number of indices in the remote cluster. */ indices_count: integer + /** Total data set size, in bytes, of all shards assigned to selected nodes. */ indices_total_size_in_bytes: long + /** Total data set size of all shards assigned to selected nodes, as a human-readable string. */ indices_total_size?: string + /** Maximum amount of memory, in bytes, available for use by the heap across the nodes of the remote cluster. */ max_heap_in_bytes: long + /** Maximum amount of memory available for use by the heap across the nodes of the remote cluster, as a human-readable string. */ max_heap?: string + /** Total amount, in bytes, of physical memory across the nodes of the remote cluster. */ mem_total_in_bytes: long + /** Total amount of physical memory across the nodes of the remote cluster, as a human-readable string. */ mem_total?: string } @@ -10005,27 +16794,50 @@ export interface ClusterStatsRepositoryStatsShards { } export interface ClusterStatsRequest extends RequestBase { + /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ node_id?: NodeIds + /** Include remote cluster data into the response */ include_remotes?: boolean + /** Period to wait for each node to respond. + * If a node does not respond before its timeout expires, the response does not include its stats. + * However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, include_remotes?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, include_remotes?: never, timeout?: never } } export type ClusterStatsResponse = ClusterStatsStatsResponseBase export interface ClusterStatsRuntimeFieldTypes { + /** Maximum number of characters for a single runtime field script. */ chars_max: integer + /** Total number of characters for the scripts that define the current runtime field data type. */ chars_total: integer + /** Number of runtime fields mapped to the field data type in selected nodes. */ count: integer + /** Maximum number of accesses to doc_values for a single runtime field script */ doc_max: integer + /** Total number of accesses to doc_values for the scripts that define the current runtime field data type. */ doc_total: integer + /** Number of indices containing a mapping of the runtime field data type in selected nodes. */ index_count: integer + /** Script languages used for the runtime fields scripts. */ lang: string[] + /** Maximum number of lines for a single runtime field script. */ lines_max: integer + /** Total number of lines for the scripts that define the current runtime field data type. */ lines_total: integer + /** Field data type used in selected nodes. */ name: Name + /** Number of runtime fields that don’t declare a script. */ scriptless_count: integer + /** Number of runtime fields that shadow an indexed field. */ shadowed_count: integer + /** Maximum number of accesses to _source for a single runtime field script. */ source_max: integer + /** Total number of accesses to _source for the scripts that define the current runtime field data type. */ source_total: integer } @@ -10040,10 +16852,15 @@ export interface ClusterStatsSearchUsageStats { export type ClusterStatsShardState = 'INIT' | 'SUCCESS' | 'FAILED' | 'ABORTED' | 'MISSING' | 'WAITING' | 'QUEUED' | 'PAUSED_FOR_NODE_REMOVAL' export interface ClusterStatsSnapshotCurrentCounts { + /** Snapshots currently in progress */ snapshots: integer + /** Incomplete shard snapshots */ shard_snapshots: integer + /** Snapshots deletions in progress */ snapshot_deletions: integer + /** Sum of snapshots and snapshot_deletions */ concurrent_operations: integer + /** Cleanups in progress, not counted in concurrent_operations as they are not concurrent */ cleanups: integer } @@ -10052,14 +16869,23 @@ export interface ClusterStatsSparseVectorStats { } export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { + /** Name of the cluster, based on the cluster name setting. */ cluster_name: Name + /** Unique identifier for the cluster. */ cluster_uuid: Uuid + /** Contains statistics about indices with shards assigned to selected nodes. */ indices: ClusterStatsClusterIndices + /** Contains statistics about nodes selected by the request’s node filters. */ nodes: ClusterStatsClusterNodes + /** Contains stats on repository feature usage exposed in cluster stats for telemetry. */ repositories: Record> + /** Contains stats cluster snapshots. */ snapshots: ClusterStatsClusterSnapshotStats + /** Health status of the cluster, based on the state of its primary and replica shards. */ status?: HealthStatus + /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */ timestamp: long + /** Cross-cluster stats */ ccs: ClusterStatsCCSStats } @@ -10124,8 +16950,11 @@ export type ConnectorConnectorConfiguration = Record export interface ConnectorConnectorFeatures { + /** Indicates whether document-level security is enabled. */ document_level_security?: ConnectorFeatureEnabled + /** Indicates whether incremental syncs are enabled. */ incremental_sync?: ConnectorFeatureEnabled + /** Indicates whether managed connector API keys are enabled. */ native_connector_api_keys?: ConnectorFeatureEnabled sync_rules?: ConnectorSyncRulesFeature } @@ -10134,6 +16963,7 @@ export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' export interface ConnectorConnectorScheduling { enabled: boolean + /** The interval is expressed using the crontab syntax */ interval: string } @@ -10291,7 +17121,9 @@ export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' export interface ConnectorSyncRulesFeature { + /** Indicates whether advanced sync rules are enabled. */ advanced?: ConnectorFeatureEnabled + /** Indicates whether basic sync rules are enabled. */ basic?: ConnectorFeatureEnabled } @@ -10300,7 +17132,12 @@ export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'erro export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation export interface ConnectorCheckInRequest extends RequestBase { + /** The unique identifier of the connector to be checked in */ connector_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never } } export interface ConnectorCheckInResponse { @@ -10308,19 +17145,31 @@ export interface ConnectorCheckInResponse { } export interface ConnectorDeleteRequest extends RequestBase { + /** The unique identifier of the connector to be deleted */ connector_id: Id + /** A flag indicating if associated sync jobs should be also removed. Defaults to false. */ delete_sync_jobs?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, delete_sync_jobs?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, delete_sync_jobs?: never } } export type ConnectorDeleteResponse = AcknowledgedResponseBase export interface ConnectorGetRequest extends RequestBase { + /** The unique identifier of the connector */ connector_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never } } export type ConnectorGetResponse = ConnectorConnector export interface ConnectorLastSyncRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id last_access_control_sync_error?: string last_access_control_sync_scheduled_at?: DateTime @@ -10334,6 +17183,10 @@ export interface ConnectorLastSyncRequest extends RequestBase { last_sync_status?: ConnectorSyncStatus last_synced?: DateTime sync_cursor?: any + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, last_access_control_sync_error?: never, last_access_control_sync_scheduled_at?: never, last_access_control_sync_status?: never, last_deleted_document_count?: never, last_incremental_sync_scheduled_at?: never, last_indexed_document_count?: never, last_seen?: never, last_sync_error?: never, last_sync_scheduled_at?: never, last_sync_status?: never, last_synced?: never, sync_cursor?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, last_access_control_sync_error?: never, last_access_control_sync_scheduled_at?: never, last_access_control_sync_status?: never, last_deleted_document_count?: never, last_incremental_sync_scheduled_at?: never, last_indexed_document_count?: never, last_seen?: never, last_sync_error?: never, last_sync_scheduled_at?: never, last_sync_status?: never, last_synced?: never, sync_cursor?: never } } export interface ConnectorLastSyncResponse { @@ -10341,12 +17194,22 @@ export interface ConnectorLastSyncResponse { } export interface ConnectorListRequest extends RequestBase { + /** Starting offset (default: 0) */ from?: integer + /** Specifies a max number of results to get */ size?: integer + /** A comma-separated list of connector index names to fetch connector documents for */ index_name?: Indices + /** A comma-separated list of connector names to fetch connector documents for */ connector_name?: Names + /** A comma-separated list of connector service types to fetch connector documents for */ service_type?: Names + /** A wildcard query string that filters connectors with matching name, description or index name */ query?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, index_name?: never, connector_name?: never, service_type?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, index_name?: never, connector_name?: never, service_type?: never, query?: never } } export interface ConnectorListResponse { @@ -10361,6 +17224,10 @@ export interface ConnectorPostRequest extends RequestBase { language?: string name?: string service_type?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } } export interface ConnectorPostResponse { @@ -10369,6 +17236,7 @@ export interface ConnectorPostResponse { } export interface ConnectorPutRequest extends RequestBase { + /** The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. */ connector_id?: Id description?: string index_name?: IndexName @@ -10376,6 +17244,10 @@ export interface ConnectorPutRequest extends RequestBase { language?: string name?: string service_type?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } } export interface ConnectorPutResponse { @@ -10384,7 +17256,12 @@ export interface ConnectorPutResponse { } export interface ConnectorSyncJobCancelRequest extends RequestBase { + /** The unique identifier of the connector sync job */ connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } } export interface ConnectorSyncJobCancelResponse { @@ -10392,47 +17269,85 @@ export interface ConnectorSyncJobCancelResponse { } export interface ConnectorSyncJobCheckInRequest extends RequestBase { + /** The unique identifier of the connector sync job to be checked in. */ connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } } export interface ConnectorSyncJobCheckInResponse { } export interface ConnectorSyncJobClaimRequest extends RequestBase { + /** The unique identifier of the connector sync job. */ connector_sync_job_id: Id + /** The cursor object from the last incremental sync job. + * This should reference the `sync_cursor` field in the connector state for which the job runs. */ sync_cursor?: any + /** The host name of the current system that will run the job. */ worker_hostname: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, sync_cursor?: never, worker_hostname?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, sync_cursor?: never, worker_hostname?: never } } export interface ConnectorSyncJobClaimResponse { } export interface ConnectorSyncJobDeleteRequest extends RequestBase { + /** The unique identifier of the connector sync job to be deleted */ connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } } export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase export interface ConnectorSyncJobErrorRequest extends RequestBase { + /** The unique identifier for the connector sync job. */ connector_sync_job_id: Id + /** The error for the connector sync job error field. */ error: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, error?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, error?: never } } export interface ConnectorSyncJobErrorResponse { } export interface ConnectorSyncJobGetRequest extends RequestBase { + /** The unique identifier of the connector sync job */ connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } } export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob export interface ConnectorSyncJobListRequest extends RequestBase { + /** Starting offset (default: 0) */ from?: integer + /** Specifies a max number of results to get */ size?: integer + /** A sync job status to fetch connector sync jobs for */ status?: ConnectorSyncStatus + /** A connector id to fetch connector sync jobs for */ connector_id?: Id + /** A comma-separated list of job types to fetch the sync jobs for */ job_type?: ConnectorSyncJobType | ConnectorSyncJobType[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, status?: never, connector_id?: never, job_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, status?: never, connector_id?: never, job_type?: never } } export interface ConnectorSyncJobListResponse { @@ -10441,9 +17356,14 @@ export interface ConnectorSyncJobListResponse { } export interface ConnectorSyncJobPostRequest extends RequestBase { + /** The id of the associated connector */ id: Id job_type?: ConnectorSyncJobType trigger_method?: ConnectorSyncJobTriggerMethod + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, job_type?: never, trigger_method?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, job_type?: never, trigger_method?: never } } export interface ConnectorSyncJobPostResponse { @@ -10451,20 +17371,36 @@ export interface ConnectorSyncJobPostResponse { } export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { + /** The unique identifier of the connector sync job. */ connector_sync_job_id: Id + /** The number of documents the sync job deleted. */ deleted_document_count: long + /** The number of documents the sync job indexed. */ indexed_document_count: long + /** The total size of the data (in MiB) the sync job indexed. */ indexed_document_volume: long + /** The timestamp to use in the `last_seen` property for the connector sync job. */ last_seen?: Duration + /** The connector-specific metadata. */ metadata?: Metadata + /** The total number of documents in the target index after the sync job finished. */ total_document_count?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, deleted_document_count?: never, indexed_document_count?: never, indexed_document_volume?: never, last_seen?: never, metadata?: never, total_document_count?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, deleted_document_count?: never, indexed_document_count?: never, indexed_document_volume?: never, last_seen?: never, metadata?: never, total_document_count?: never } } export interface ConnectorSyncJobUpdateStatsResponse { } export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never } } export interface ConnectorUpdateActiveFilteringResponse { @@ -10472,9 +17408,14 @@ export interface ConnectorUpdateActiveFilteringResponse { } export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id api_key_id?: string api_key_secret_id?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, api_key_id?: never, api_key_secret_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, api_key_id?: never, api_key_secret_id?: never } } export interface ConnectorUpdateApiKeyIdResponse { @@ -10482,9 +17423,14 @@ export interface ConnectorUpdateApiKeyIdResponse { } export interface ConnectorUpdateConfigurationRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id configuration?: ConnectorConnectorConfiguration values?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, configuration?: never, values?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, configuration?: never, values?: never } } export interface ConnectorUpdateConfigurationResponse { @@ -10492,8 +17438,13 @@ export interface ConnectorUpdateConfigurationResponse { } export interface ConnectorUpdateErrorRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id error: SpecUtilsWithNullValue + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, error?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, error?: never } } export interface ConnectorUpdateErrorResponse { @@ -10501,8 +17452,13 @@ export interface ConnectorUpdateErrorResponse { } export interface ConnectorUpdateFeaturesRequest extends RequestBase { + /** The unique identifier of the connector to be updated. */ connector_id: Id features: ConnectorConnectorFeatures + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, features?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, features?: never } } export interface ConnectorUpdateFeaturesResponse { @@ -10510,10 +17466,15 @@ export interface ConnectorUpdateFeaturesResponse { } export interface ConnectorUpdateFilteringRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id filtering?: ConnectorFilteringConfig[] rules?: ConnectorFilteringRule[] advanced_snippet?: ConnectorFilteringAdvancedSnippet + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, filtering?: never, rules?: never, advanced_snippet?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, filtering?: never, rules?: never, advanced_snippet?: never } } export interface ConnectorUpdateFilteringResponse { @@ -10521,8 +17482,13 @@ export interface ConnectorUpdateFilteringResponse { } export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id validation: ConnectorFilteringRulesValidation + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, validation?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, validation?: never } } export interface ConnectorUpdateFilteringValidationResponse { @@ -10530,8 +17496,13 @@ export interface ConnectorUpdateFilteringValidationResponse { } export interface ConnectorUpdateIndexNameRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id index_name: SpecUtilsWithNullValue + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, index_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, index_name?: never } } export interface ConnectorUpdateIndexNameResponse { @@ -10539,9 +17510,14 @@ export interface ConnectorUpdateIndexNameResponse { } export interface ConnectorUpdateNameRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id name?: string description?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, name?: never, description?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, name?: never, description?: never } } export interface ConnectorUpdateNameResponse { @@ -10549,8 +17525,13 @@ export interface ConnectorUpdateNameResponse { } export interface ConnectorUpdateNativeRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id is_native: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, is_native?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, is_native?: never } } export interface ConnectorUpdateNativeResponse { @@ -10558,8 +17539,13 @@ export interface ConnectorUpdateNativeResponse { } export interface ConnectorUpdatePipelineRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id pipeline: ConnectorIngestPipelineParams + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, pipeline?: never } } export interface ConnectorUpdatePipelineResponse { @@ -10567,8 +17553,13 @@ export interface ConnectorUpdatePipelineResponse { } export interface ConnectorUpdateSchedulingRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id scheduling: ConnectorSchedulingConfiguration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, scheduling?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, scheduling?: never } } export interface ConnectorUpdateSchedulingResponse { @@ -10576,8 +17567,13 @@ export interface ConnectorUpdateSchedulingResponse { } export interface ConnectorUpdateServiceTypeRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id service_type: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, service_type?: never } } export interface ConnectorUpdateServiceTypeResponse { @@ -10585,8 +17581,13 @@ export interface ConnectorUpdateServiceTypeResponse { } export interface ConnectorUpdateStatusRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ connector_id: Id status: ConnectorConnectorStatus + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, status?: never } } export interface ConnectorUpdateStatusResponse { @@ -10594,19 +17595,36 @@ export interface ConnectorUpdateStatusResponse { } export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { + /** The UUID of the index to delete. Use the get dangling indices API to find the UUID. */ index_uuid: Uuid + /** This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. */ accept_data_loss: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } } export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { + /** The UUID of the index to import. Use the get dangling indices API to locate the UUID. */ index_uuid: Uuid + /** This parameter must be set to true to import a dangling index. + * Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ accept_data_loss: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } } export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase @@ -10619,6 +17637,10 @@ export interface DanglingIndicesListDanglingIndicesDanglingIndex { } export interface DanglingIndicesListDanglingIndicesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface DanglingIndicesListDanglingIndicesResponse { @@ -10641,8 +17663,14 @@ export interface EnrichSummary { } export interface EnrichDeletePolicyRequest extends RequestBase { + /** Enrich policy to delete. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type EnrichDeletePolicyResponse = AcknowledgedResponseBase @@ -10655,9 +17683,16 @@ export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { } export interface EnrichExecutePolicyRequest extends RequestBase { + /** Enrich policy to execute. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** If `true`, the request blocks other enrich policy execution requests until complete. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, wait_for_completion?: never } } export interface EnrichExecutePolicyResponse { @@ -10666,8 +17701,15 @@ export interface EnrichExecutePolicyResponse { } export interface EnrichGetPolicyRequest extends RequestBase { + /** Comma-separated list of enrich policy names used to limit the request. + * To return information for all enrich policies, omit this parameter. */ name?: Names + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export interface EnrichGetPolicyResponse { @@ -10675,11 +17717,20 @@ export interface EnrichGetPolicyResponse { } export interface EnrichPutPolicyRequest extends RequestBase { + /** Name of the enrich policy to create or update. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Matches enrich data to incoming documents based on a `geo_shape` query. */ geo_match?: EnrichPolicy + /** Matches enrich data to incoming documents based on a `term` query. */ match?: EnrichPolicy + /** Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. */ range?: EnrichPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, geo_match?: never, match?: never, range?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, geo_match?: never, match?: never, range?: never } } export type EnrichPutPolicyResponse = AcknowledgedResponseBase @@ -10709,94 +17760,171 @@ export interface EnrichStatsExecutingPolicy { } export interface EnrichStatsRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface EnrichStatsResponse { + /** Objects containing information about each coordinating ingest node for configured enrich processors. */ coordinator_stats: EnrichStatsCoordinatorStats[] + /** Objects containing information about each enrich policy that is currently executing. */ executing_policies: EnrichStatsExecutingPolicy[] + /** Objects containing information about the enrich cache stats on each ingest node. */ cache_stats?: EnrichStatsCacheStats[] } export interface EqlEqlHits { + /** Metadata about the number of matching events or sequences. */ total?: SearchTotalHits + /** Contains events matching the query. Each object represents a matching event. */ events?: EqlHitsEvent[] + /** Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence. */ sequences?: EqlHitsSequence[] } export interface EqlEqlSearchResponseBase { + /** Identifier for the search. */ id?: Id + /** If true, the response does not contain complete search results. */ is_partial?: boolean + /** If true, the search request is still executing. */ is_running?: boolean + /** Milliseconds it took Elasticsearch to execute the request. */ took?: DurationValue + /** If true, the request timed out before completion. */ timed_out?: boolean + /** Contains matching events and sequences. Also contains related metadata. */ hits: EqlEqlHits + /** Contains information about shard failures (if any), in case allow_partial_search_results=true */ shard_failures?: ShardFailure[] } export interface EqlHitsEvent { + /** Name of the index containing the event. */ _index: IndexName + /** Unique identifier for the event. This ID is only unique within the index. */ _id: Id + /** Original JSON body passed for the event at index time. */ _source: TEvent + /** Set to `true` for events in a timespan-constrained sequence that do not meet a given condition. */ missing?: boolean fields?: Record } export interface EqlHitsSequence { + /** Contains events matching the query. Each object represents a matching event. */ events: EqlHitsEvent[] + /** Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax. */ join_keys?: any[] } export interface EqlDeleteRequest extends RequestBase { + /** Identifier for the search to delete. + * A search ID is provided in the EQL search API's response for an async search. + * A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type EqlDeleteResponse = AcknowledgedResponseBase export interface EqlGetRequest extends RequestBase { + /** Identifier for the search. */ id: Id + /** Period for which the search and its results are stored on the cluster. + * Defaults to the keep_alive value set by the search’s EQL search API request. */ keep_alive?: Duration + /** Timeout duration to wait for the request to finish. + * Defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never, wait_for_completion_timeout?: never } } export type EqlGetResponse = EqlEqlSearchResponseBase export interface EqlGetStatusRequest extends RequestBase { + /** Identifier for the search. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface EqlGetStatusResponse { + /** Identifier for the search. */ id: Id + /** If true, the search request is still executing. If false, the search is completed. */ is_partial: boolean + /** If true, the response does not contain complete search results. This could be because either the search is still running (is_running status is false), or because it is already completed (is_running status is true) and results are partial due to failures or timeouts. */ is_running: boolean + /** For a running search shows a timestamp when the eql search started, in milliseconds since the Unix epoch. */ start_time_in_millis?: EpochTime + /** Shows a timestamp when the eql search will be expired, in milliseconds since the Unix epoch. When this time is reached, the search and its results are deleted, even if the search is still ongoing. */ expiration_time_in_millis?: EpochTime + /** For a completed search shows the http status code of the completed search. */ completion_status?: integer } export interface EqlSearchRequest extends RequestBase { + /** The name of the index to scope the operation */ index: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution */ ccs_minimize_roundtrips?: boolean + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** EQL query you wish to run. */ query: string case_sensitive?: boolean + /** Field containing the event classification, such as process, file, or network. */ event_category_field?: Field + /** Field used to sort hits with the same timestamp in ascending order */ tiebreaker_field?: Field + /** Field containing event timestamp. Default "@timestamp" */ timestamp_field?: Field + /** Maximum number of events to search at a time for sequence queries. */ fetch_size?: uint + /** Query, written in Query DSL, used to filter the events on which the EQL query runs. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] keep_alive?: Duration keep_on_completion?: boolean wait_for_completion_timeout?: Duration + /** Allow query execution also in case of shard failures. + * If true, the query will keep running and will return results based on the available shards. + * For sequences, the behavior can be further refined using allow_partial_sequence_results */ allow_partial_search_results?: boolean + /** This flag applies only to sequences and has effect only if allow_partial_search_results=true. + * If true, the sequence query will return results based on the available shards, ignoring the others. + * If false, the sequence query will return successfully, but will always have empty results. */ allow_partial_sequence_results?: boolean + /** For basic queries, the maximum number of matching events to return. Defaults to 10 */ size?: uint + /** Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. */ fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition runtime_mappings?: MappingRuntimeFields + /** By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` + * parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the + * `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ max_samples_per_key?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } } export type EqlSearchResponse = EqlEqlSearchResponseBase @@ -10804,7 +17932,13 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase> + /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. */ include_ccs_metadata?: boolean + /** The period to wait for the request to finish. + * By default, the request waits for 1 second for the query results. + * If the query completes during this period, results are returned + * Otherwise, a query ID is returned that can later be used to retrieve the results. */ wait_for_completion_timeout?: Duration + /** The period for which the query and its results are stored in the cluster. + * The default period is five days. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. + * If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ keep_alive?: Duration + /** Indicates whether the query and its results are stored in the cluster. + * If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ keep_on_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } } export type EsqlAsyncQueryResponse = EsqlEsqlResult export interface EsqlAsyncQueryDeleteRequest extends RequestBase { + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase export interface EsqlAsyncQueryGetRequest extends RequestBase { + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean + /** A short version of the Accept header, for example `json` or `yaml`. */ format?: EsqlQueryEsqlFormat + /** The period for which the query and its results are stored in the cluster. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. */ keep_alive?: Duration + /** The period to wait for the request to finish. + * By default, the request waits for complete query results. + * If the request completes during the period specified in this parameter, complete query results are returned. + * Otherwise, the response returns an `is_running` value of `true` and no results. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } } export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult export interface EsqlAsyncQueryStopRequest extends RequestBase { + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never } } export type EsqlAsyncQueryStopResponse = EsqlEsqlResult @@ -10918,18 +18129,45 @@ export type EsqlAsyncQueryStopResponse = EsqlEsqlResult export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' export interface EsqlQueryRequest extends RequestBase { + /** A short version of the Accept header, e.g. json, yaml. + * + * `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. */ format?: EsqlQueryEsqlFormat + /** The character to use between values within a CSV row. Only valid for the CSV format. */ delimiter?: string + /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? + * Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ drop_null_columns?: boolean + /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. + * If `false`, the query will fail if there are any failures. + * + * To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. */ allow_partial_results?: boolean + /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean + /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ filter?: QueryDslQueryContainer locale?: string + /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ params?: FieldValue[] + /** If provided and `true` the response will include an extra `profile` object + * with information on how the query was executed. This information is for human debugging + * and its format can change at any time but it can give some insight into the performance + * of each part of the query. */ profile?: boolean + /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ query: string + /** Tables to use with the LOOKUP operation. The top level key is the table + * name and the next level key is the column name. */ tables?: Record> + /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. */ include_ccs_metadata?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } } export type EsqlQueryResponse = EsqlEsqlResult @@ -10940,7 +18178,12 @@ export interface FeaturesFeature { } export interface FeaturesGetFeaturesRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface FeaturesGetFeaturesResponse { @@ -10948,7 +18191,12 @@ export interface FeaturesGetFeaturesResponse { } export interface FeaturesResetFeaturesRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface FeaturesResetFeaturesResponse { @@ -10958,11 +18206,24 @@ export interface FeaturesResetFeaturesResponse { export type FleetCheckpoint = long export interface FleetGlobalCheckpointsRequest extends RequestBase { + /** A single index or index alias that resolves to a single index. */ index: IndexName | IndexAlias + /** A boolean value which controls whether to wait (until the timeout) for the global checkpoints + * to advance past the provided `checkpoints`. */ wait_for_advance?: boolean + /** A boolean value which controls whether to wait (until the timeout) for the target index to exist + * and all primary shards be active. Can only be true when `wait_for_advance` is true. */ wait_for_index?: boolean + /** A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, + * the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list + * will cause Elasticsearch to immediately return the current global checkpoints. */ checkpoints?: FleetCheckpoint[] + /** Period to wait for a global checkpoints to advance past `checkpoints`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, wait_for_advance?: never, wait_for_index?: never, checkpoints?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, wait_for_advance?: never, wait_for_index?: never, checkpoints?: never, timeout?: never } } export interface FleetGlobalCheckpointsResponse { @@ -10971,21 +18232,43 @@ export interface FleetGlobalCheckpointsResponse { } export interface FleetMsearchRequest extends RequestBase { + /** A single target to search. If the target is an index alias, it must resolve to a single index. */ index?: IndexName | IndexAlias + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Maximum number of concurrent searches the multi search API can execute. */ max_concurrent_searches?: long + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ max_concurrent_shard_requests?: long + /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long + /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ search_type?: SearchType + /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ rest_total_hits_as_int?: boolean + /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ typed_keys?: boolean + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard + * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause + * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] + /** If true, returns partial results if there are shard request timeouts or shard failures. + * If false, returns an error with no partial results. + * Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. */ allow_partial_search_results?: boolean searches?: MsearchRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, searches?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, searches?: never } } export interface FleetMsearchResponse { @@ -10993,6 +18276,7 @@ export interface FleetMsearchResponse { } export interface FleetSearchRequest extends RequestBase { + /** A single target to search. If the target is an index alias, it must resolve to a single index. */ index: IndexName | IndexAlias allow_no_indices?: boolean analyzer?: string @@ -11013,50 +18297,106 @@ export interface FleetSearchRequest extends RequestBase { routing?: Routing scroll?: Duration search_type?: SearchType + /** Specifies which field to use for suggestions. */ suggest_field?: Field suggest_mode?: SuggestMode suggest_size?: long + /** The source text for which the suggestions should be returned. */ suggest_text?: string typed_keys?: boolean rest_total_hits_as_int?: boolean _source_excludes?: Fields _source_includes?: Fields q?: string + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard + * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause + * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] + /** If true, returns partial results if there are shard request timeouts or shard failures. If false, returns + * an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + * which is true by default. */ allow_partial_search_results?: boolean aggregations?: Record /** @alias aggregations */ aggs?: Record collapse?: SearchFieldCollapse + /** If true, returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ from?: integer highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits + /** Boosts the _score of documents from specified indices. */ indices_boost?: Partial>[] + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** Minimum _score for matching documents. Documents with a lower _score are + * not included in search results and results collected by aggregations. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean + /** Defines the search definition using the Query DSL. */ query?: QueryDslQueryContainer rescore?: SearchRescore | SearchRescore[] + /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ size?: integer slice?: SlicedScroll sort?: Sort + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string + /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If true, returns document version as part of a hit. */ version?: boolean + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ pit?: SearchPointInTimeReference + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, min_compatible_shard_node?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, min_compatible_shard_node?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } } export interface FleetSearchResponse { @@ -11084,15 +18424,28 @@ export interface GraphConnection { } export interface GraphExploreControls { + /** To avoid the top-matching documents sample being dominated by a single source of results, it is sometimes necessary to request diversity in the sample. + * You can do this by selecting a single-value field and setting a maximum number of documents per value for that field. */ sample_diversity?: GraphSampleDiversity + /** Each hop considers a sample of the best-matching documents on each shard. + * Using samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms. + * Very small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms. + * Very large sample sizes can dilute the quality of the results and increase execution times. */ sample_size?: integer + /** The length of time in milliseconds after which exploration will be halted and the results gathered so far are returned. + * This timeout is honored on a best-effort basis. + * Execution might overrun this timeout if, for example, a long pause is encountered while FieldData is loaded for a field. */ timeout?: Duration + /** Filters associated terms so only those that are significantly associated with your query are included. */ use_significance: boolean } export interface GraphHop { + /** Specifies one or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop + /** An optional guiding query that constrains the Graph API as it explores connected terms. */ query: QueryDslQueryContainer + /** Contains the fields you are interested in. */ vertices: GraphVertexDefinition[] } @@ -11109,11 +18462,18 @@ export interface GraphVertex { } export interface GraphVertexDefinition { + /** Prevents the specified terms from being included in the results. */ exclude?: string[] + /** Identifies a field in the documents of interest. */ field: Field + /** Identifies the terms of interest that form the starting points from which you want to spider out. */ include?: GraphVertexInclude[] + /** Specifies how many documents must contain a pair of terms before it is considered to be a useful connection. + * This setting acts as a certainty threshold. */ min_doc_count?: long + /** Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration. */ shard_min_doc_count?: long + /** Specifies the maximum number of vertex terms returned for each field. */ size?: integer } @@ -11123,13 +18483,26 @@ export interface GraphVertexInclude { } export interface GraphExploreRequest extends RequestBase { + /** Name of the index. */ index: Indices + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** Specifies the period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: Duration + /** Specifies or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop + /** Direct the Graph API how to build the graph. */ controls?: GraphExploreControls + /** A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. */ query?: QueryDslQueryContainer + /** Specifies one or more fields that contain the terms you want to include in the graph as vertices. */ vertices?: GraphVertexDefinition[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, routing?: never, timeout?: never, connections?: never, controls?: never, query?: never, vertices?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, routing?: never, timeout?: never, connections?: never, controls?: never, query?: never, vertices?: never } } export interface GraphExploreResponse { @@ -11141,18 +18514,31 @@ export interface GraphExploreResponse { } export interface IlmActions { + /** Phases allowed: warm, cold. */ allocate?: IlmAllocateAction + /** Phases allowed: delete. */ delete?: IlmDeleteAction + /** Phases allowed: hot, warm, cold. */ downsample?: IlmDownsampleAction + /** The freeze action is a noop in 8.x */ freeze?: EmptyObject + /** Phases allowed: hot, warm. */ forcemerge?: IlmForceMergeAction + /** Phases allowed: warm, cold. */ migrate?: IlmMigrateAction + /** Phases allowed: hot, warm, cold. */ readonly?: EmptyObject + /** Phases allowed: hot. */ rollover?: IlmRolloverAction + /** Phases allowed: hot, warm, cold. */ set_priority?: IlmSetPriorityAction + /** Phases allowed: hot, cold, frozen. */ searchable_snapshot?: IlmSearchableSnapshotAction + /** Phases allowed: hot, warm. */ shrink?: IlmShrinkAction + /** Phases allowed: hot, warm, cold, frozen. */ unfollow?: EmptyObject + /** Phases allowed: delete. */ wait_for_snapshot?: IlmWaitForSnapshotAction } @@ -11197,6 +18583,7 @@ export interface IlmPhases { export interface IlmPolicy { phases: IlmPhases + /** Arbitrary metadata that is not automatically generated or used by Elasticsearch. */ _meta?: Metadata } @@ -11233,9 +18620,16 @@ export interface IlmWaitForSnapshotAction { } export interface IlmDeleteLifecycleRequest extends RequestBase { + /** Identifier for the policy. */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase @@ -11286,10 +18680,19 @@ export interface IlmExplainLifecycleLifecycleExplainUnmanaged { } export interface IlmExplainLifecycleRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). + * To target all data streams and indices, use `*` or `_all`. */ index: IndexName + /** Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. */ only_errors?: boolean + /** Filters the returned indices to only indices that are managed by ILM. */ only_managed?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, only_errors?: never, only_managed?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, only_errors?: never, only_managed?: never, master_timeout?: never } } export interface IlmExplainLifecycleResponse { @@ -11303,14 +18706,25 @@ export interface IlmGetLifecycleLifecycle { } export interface IlmGetLifecycleRequest extends RequestBase { + /** Identifier for the policy. */ name?: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IlmGetLifecycleResponse = Record export interface IlmGetStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface IlmGetStatusResponse { @@ -11318,46 +18732,80 @@ export interface IlmGetStatusResponse { } export interface IlmMigrateToDataTiersRequest extends RequestBase { + /** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. + * This provides a way to retrieve the indices and ILM policies that need to be migrated. */ dry_run?: boolean legacy_template_to_delete?: string node_attribute?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, legacy_template_to_delete?: never, node_attribute?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, legacy_template_to_delete?: never, node_attribute?: never } } export interface IlmMigrateToDataTiersResponse { dry_run: boolean + /** The name of the legacy index template that was deleted. + * This information is missing if no legacy index templates were deleted. */ removed_legacy_template: string + /** The ILM policies that were updated. */ migrated_ilm_policies: string[] + /** The indices that were migrated to tier preference routing. */ migrated_indices: Indices + /** The legacy index templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_legacy_templates: string[] + /** The composable index templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_composable_templates: string[] + /** The component templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_component_templates: string[] } export interface IlmMoveToStepRequest extends RequestBase { + /** The name of the index whose lifecycle step is to change */ index: IndexName + /** The step that the index is expected to be in. */ current_step: IlmMoveToStepStepKey + /** The step that you want to run. */ next_step: IlmMoveToStepStepKey + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, current_step?: never, next_step?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, current_step?: never, next_step?: never } } export type IlmMoveToStepResponse = AcknowledgedResponseBase export interface IlmMoveToStepStepKey { + /** The optional action to which the index will be moved. */ action?: string + /** The optional step name to which the index will be moved. */ name?: string phase: string } export interface IlmPutLifecycleRequest extends RequestBase { + /** Identifier for the policy. */ name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration policy?: IlmPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } } export type IlmPutLifecycleResponse = AcknowledgedResponseBase export interface IlmRemovePolicyRequest extends RequestBase { + /** The name of the index to remove policy on */ index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export interface IlmRemovePolicyResponse { @@ -11366,40 +18814,75 @@ export interface IlmRemovePolicyResponse { } export interface IlmRetryRequest extends RequestBase { + /** The name of the indices (comma-separated) whose failed lifecycle step is to be retry */ index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export type IlmRetryResponse = AcknowledgedResponseBase export interface IlmStartRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type IlmStartResponse = AcknowledgedResponseBase export interface IlmStopRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type IlmStopResponse = AcknowledgedResponseBase export interface IndicesAlias { + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. */ index_routing?: Routing + /** If `true`, the alias is hidden. + * All indices for the alias must have the same `is_hidden` value. */ is_hidden?: boolean + /** If `true`, the index is the write index for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. */ routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. */ search_routing?: Routing } export interface IndicesAliasDefinition { + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. */ index_routing?: string + /** If `true`, the index is the write index for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. */ routing?: string + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. */ search_routing?: string + /** If `true`, the alias is hidden. + * All indices for the alias must have the same `is_hidden` value. */ is_hidden?: boolean } @@ -11408,40 +18891,76 @@ export interface IndicesCacheQueries { } export interface IndicesDataStream { + /** Custom metadata for the stream, copied from the `_meta` object of the stream’s matching index template. + * If empty, the response omits this property. */ _meta?: Metadata + /** If `true`, the data stream allows custom routing on write request. */ allow_custom_routing?: boolean + /** Information about failure store backing indices */ failure_store?: IndicesFailureStore + /** Current generation for the data stream. This number acts as a cumulative count of the stream’s rollovers, starting at 1. */ generation: integer + /** If `true`, the data stream is hidden. */ hidden: boolean + /** Name of the current ILM lifecycle policy in the stream’s matching index template. + * This lifecycle policy is set in the `index.lifecycle.name` setting. + * If the template does not include a lifecycle policy, this property is not included in the response. + * NOTE: A data stream’s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API. */ ilm_policy?: Name + /** Name of the lifecycle system that'll manage the next generation of the data stream. */ next_generation_managed_by: IndicesManagedBy + /** Indicates if ILM should take precedence over DSL in case both are configured to managed this data stream. */ prefer_ilm: boolean + /** Array of objects containing information about the data stream’s backing indices. + * The last item in this array contains information about the stream’s current write index. */ indices: IndicesDataStreamIndex[] + /** Contains the configuration for the data stream lifecycle of this data stream. */ lifecycle?: IndicesDataStreamLifecycleWithRollover + /** Name of the data stream. */ name: DataStreamName + /** If `true`, the data stream is created and managed by cross-cluster replication and the local cluster can not write into this data stream or change its mappings. */ replicated?: boolean + /** If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */ rollover_on_write: boolean + /** Health status of the data stream. + * This health status is based on the state of the primary and replica shards of the stream’s backing indices. */ status: HealthStatus + /** If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction. */ system?: boolean + /** Name of the index template used to create the data stream’s backing indices. + * The template’s index pattern must match the name of this data stream. */ template: Name + /** Information about the `@timestamp` field in the data stream. */ timestamp_field: IndicesDataStreamTimestampField } export interface IndicesDataStreamIndex { + /** Name of the backing index. */ index_name: IndexName + /** Universally unique identifier (UUID) for the index. */ index_uuid: Uuid + /** Name of the current ILM lifecycle policy configured for this backing index. */ ilm_policy?: Name + /** Name of the lifecycle system that's currently managing this backing index. */ managed_by?: IndicesManagedBy + /** Indicates if ILM should take precedence over DSL in case both are configured to manage this index. */ prefer_ilm?: boolean } export interface IndicesDataStreamLifecycle { + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration + /** The downsampling configuration to execute for the managed backing index after rollover. */ downsampling?: IndicesDataStreamLifecycleDownsampling + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean } export interface IndicesDataStreamLifecycleDownsampling { + /** The list of downsampling rounds to execute as part of this downsampling configuration */ rounds: IndicesDownsamplingRound[] } @@ -11459,10 +18978,14 @@ export interface IndicesDataStreamLifecycleRolloverConditions { } export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { + /** The conditions which will trigger the rollover of a backing index as configured by the cluster setting `cluster.lifecycle.default.rollover`. + * This property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to true. + * The contents of this field are subject to change. */ rollover?: IndicesDataStreamLifecycleRolloverConditions } export interface IndicesDataStreamTimestampField { + /** Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */ name: Field } @@ -11472,11 +18995,14 @@ export interface IndicesDataStreamVisibility { } export interface IndicesDownsampleConfig { + /** The interval at which to aggregate the original time series index. */ fixed_interval: DurationLarge } export interface IndicesDownsamplingRound { + /** The duration since rollover when this downsampling round should execute */ after: Duration + /** The downsample configuration to execute. */ config: IndicesDownsampleConfig } @@ -11548,7 +19074,9 @@ export interface IndicesIndexSettingsKeys { routing_path?: string | string[] soft_deletes?: IndicesSoftDeletes sort?: IndicesIndexSegmentSort + /** @remarks This property is not supported on Elastic Cloud Serverless. */ number_of_shards?: integer | string + /** @remarks This property is not supported on Elastic Cloud Serverless. */ number_of_replicas?: integer | string number_of_routing_shards?: integer check_on_startup?: IndicesIndexCheckOnStartup @@ -11569,6 +19097,7 @@ export interface IndicesIndexSettingsKeys { max_shingle_diff?: integer blocks?: IndicesIndexSettingBlocks max_refresh_listeners?: integer + /** Settings to define analyzers, tokenizers, token filters and character filters. */ analyze?: IndicesSettingsAnalyze highlight?: IndicesSettingsHighlight max_terms_count?: integer @@ -11594,10 +19123,14 @@ export interface IndicesIndexSettingsKeys { settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries queries?: IndicesQueries + /** Configure custom similarity settings to customize how search results are scored. */ similarity?: Record + /** Enable or disable dynamic mapping for an index. */ mapping?: IndicesMappingLimitSettings 'indexing.slowlog'?: IndicesIndexingSlowlogSettings + /** Configure indexing back pressure limits. */ indexing_pressure?: IndicesIndexingPressure + /** The store module allows you to control how index data is stored and accessed on disk. */ store?: IndicesStorage } export type IndicesIndexSettings = IndicesIndexSettingsKeys @@ -11612,16 +19145,33 @@ export interface IndicesIndexSettingsAnalysis { } export interface IndicesIndexSettingsLifecycle { + /** The name of the policy to use to manage the index. For information about how Elasticsearch applies policy changes, see Policy updates. */ name?: Name + /** Indicates whether or not the index has been rolled over. Automatically set to true when ILM completes the rollover action. + * You can explicitly set it to skip rollover. */ indexing_complete?: SpecUtilsStringified + /** If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting + * if you create a new index that contains old data and want to use the original creation date to calculate the index + * age. Specified as a Unix epoch value in milliseconds. */ origination_date?: long + /** Set to true to parse the origination date from the index name. This origination date is used to calculate the index age + * for its phase transitions. The index name must match the pattern ^.*-{date_format}-\\d+, where the date_format is + * yyyy.MM.dd and the trailing digits are optional. An index that was rolled over would normally match the full format, + * for example logs-2016.10.31-000002). If the index name doesn’t match the pattern, index creation fails. */ parse_origination_date?: boolean step?: IndicesIndexSettingsLifecycleStep + /** The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action. + * When the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more + * information about rolling indices, see Rollover. */ rollover_alias?: string + /** Preference for the system that manages a data stream backing index (preferring ILM when both ILM and DLM are + * applicable for an index). */ prefer_ilm?: boolean | string } export interface IndicesIndexSettingsLifecycleStep { + /** Time to wait for the cluster to resolve allocation issues during an ILM shrink action. Must be greater than 1h (1 hour). + * See Shard allocation for shrink. */ wait_time_threshold?: Duration } @@ -11634,32 +19184,63 @@ export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + /** Default settings, included when the request's `include_default` is `true`. */ defaults?: IndicesIndexSettings data_stream?: DataStreamName + /** Data stream lifecycle applicable if this is a data stream. */ lifecycle?: IndicesDataStreamLifecycle } export interface IndicesIndexTemplate { + /** Name of the index template. */ index_patterns: Names + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of: Name[] + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesIndexTemplateSummary + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. */ version?: VersionNumber + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long + /** Optional user metadata about the index template. May have any contents. + * This map is not automatically generated by Elasticsearch. */ _meta?: Metadata allow_auto_create?: boolean + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesIndexTemplateDataStreamConfiguration + /** Marks this index template as deprecated. + * When creating or updating a non-deprecated index template that uses deprecated components, + * Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** A list of component template names that are allowed to be absent. */ ignore_missing_component_templates?: Names } export interface IndicesIndexTemplateDataStreamConfiguration { + /** If true, the data stream is hidden. */ hidden?: boolean + /** If true, the data stream supports custom routing. */ allow_custom_routing?: boolean } export interface IndicesIndexTemplateSummary { + /** Aliases to add. + * If the index template includes a `data_stream` object, these are data stream aliases. + * Otherwise, these are index aliases. + * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ aliases?: Record + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping parameters. */ mappings?: MappingTypeMapping + /** Configuration options for the index. */ settings?: IndicesIndexSettings lifecycle?: IndicesDataStreamLifecycleWithRollover } @@ -11674,6 +19255,9 @@ export interface IndicesIndexingPressure { } export interface IndicesIndexingPressureMemory { + /** Number of outstanding bytes that may be consumed by indexing requests. When this limit is reached or exceeded, + * the node will reject new coordinating and primary operations. When replica operations consume 1.5x this limit, + * the node will reject new replica operations. Defaults to 10% of the heap. */ limit?: integer } @@ -11685,6 +19269,8 @@ export interface IndicesIndexingSlowlogSettings { } export interface IndicesIndexingSlowlogTresholds { + /** The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.json`. + * Log and the thresholds are configured in the same way as the search slowlog. */ index?: IndicesSlowlogTresholdLevels } @@ -11703,22 +19289,34 @@ export interface IndicesMappingLimitSettings { } export interface IndicesMappingLimitSettingsDepth { + /** The maximum depth for a field, which is measured as the number of inner objects. For instance, if all fields are defined + * at the root object level, then the depth is 1. If there is one object mapping, then the depth is 2, etc. */ limit?: long } export interface IndicesMappingLimitSettingsDimensionFields { + /** [preview] This functionality is in technical preview and may be changed or removed in a future release. + * Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. */ limit?: long } export interface IndicesMappingLimitSettingsFieldNameLength { + /** Setting for the maximum length of a field name. This setting isn’t really something that addresses mappings explosion but + * might still be useful if you want to limit the field length. It usually shouldn’t be necessary to set this setting. The + * default is okay unless a user starts to add a huge number of fields with really long names. Default is `Long.MAX_VALUE` (no limit). */ limit?: long } export interface IndicesMappingLimitSettingsNestedFields { + /** The maximum number of distinct nested mappings in an index. The nested type should only be used in special cases, when + * arrays of objects need to be queried independently of each other. To safeguard against poorly designed mappings, this + * setting limits the number of unique nested types per index. */ limit?: long } export interface IndicesMappingLimitSettingsNestedObjects { + /** The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps + * to prevent out of memory errors when a document contains too many nested objects. */ limit?: long } @@ -11727,7 +19325,15 @@ export interface IndicesMappingLimitSettingsSourceFields { } export interface IndicesMappingLimitSettingsTotalFields { + /** The maximum number of fields in an index. Field and object mappings, as well as field aliases count towards this limit. + * The limit is in place to prevent mappings and searches from becoming too large. Higher values can lead to performance + * degradations and memory issues, especially in clusters with a high load or few resources. */ limit?: long | string + /** This setting determines what happens when a dynamically mapped field would exceed the total fields limit. When set + * to false (the default), the index request of the document that tries to add a dynamic field to the mapping will fail + * with the message Limit of total fields [X] has been exceeded. When set to true, the index request will not fail. + * Instead, fields that would exceed the limit are not added to the mapping, similar to dynamic: false. + * The fields that were not added to the mapping will be added to the _ignored field. */ ignore_dynamic_beyond_limit?: boolean | string } @@ -11849,7 +19455,12 @@ export interface IndicesSlowlogTresholds { } export interface IndicesSoftDeletes { + /** Indicates whether soft deletes are enabled on the index. */ enabled?: boolean + /** The maximum period to retain a shard history retention lease before it is considered expired. + * Shard history retention leases ensure that soft deletes are retained during merges on the Lucene + * index. If a soft delete is merged away before it can be replicated to a follower the following + * process will fail due to incomplete history on the leader. */ retention_lease?: IndicesRetentionLease } @@ -11857,7 +19468,12 @@ export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' export interface IndicesStorage { type: IndicesStorageType + /** You can restrict the use of the mmapfs and the related hybridfs store type via the setting node.store.allow_mmap. + * This is a boolean setting indicating whether or not memory-mapping is allowed. The default is to allow it. This + * setting is useful, for example, if you are in an environment where you can not control the ability to create a lot + * of memory maps so you need disable the ability to use memory-mapping. */ allow_mmap?: boolean + /** How often store statistics are refreshed */ stats_refresh_interval?: Duration } @@ -11873,8 +19489,16 @@ export interface IndicesTemplateMapping { } export interface IndicesTranslog { + /** How often the translog is fsynced to disk and committed, regardless of write operations. + * Values less than 100ms are not allowed. */ sync_interval?: Duration + /** Whether or not to `fsync` and commit the translog after every index, delete, update, or bulk request. */ durability?: IndicesTranslogDurability + /** The translog stores all operations that are not yet safely persisted in Lucene (i.e., are not + * part of a Lucene commit point). Although these operations are available for reads, they will need + * to be replayed if the shard was stopped and had to be recovered. This setting controls the + * maximum total size of these operations, to prevent recoveries from taking too long. Once the + * maximum size has been reached a flush will happen, generating a new Lucene commit point. */ flush_threshold_size?: ByteSize retention?: IndicesTranslogRetention } @@ -11882,7 +19506,17 @@ export interface IndicesTranslog { export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' export interface IndicesTranslogRetention { + /** This controls the total size of translog files to keep for each shard. Keeping more translog files increases + * the chance of performing an operation based sync when recovering a replica. If the translog files are not + * sufficient, replica recovery will fall back to a file based sync. This setting is ignored, and should not be + * set, if soft deletes are enabled. Soft deletes are enabled by default in indices created in Elasticsearch + * versions 7.0.0 and later. */ size?: ByteSize + /** This controls the maximum duration for which translog files are kept by each shard. Keeping more + * translog files increases the chance of performing an operation based sync when recovering replicas. If + * the translog files are not sufficient, replica recovery will fall back to a file based sync. This setting + * is ignored, and should not be set, if soft deletes are enabled. Soft deletes are enabled by default in + * indices created in Elasticsearch versions 7.0.0 and later. */ age?: Duration } @@ -11894,13 +19528,35 @@ export interface IndicesAddBlockIndicesBlockStatus { } export interface IndicesAddBlockRequest extends RequestBase { + /** A comma-separated list or wildcard expression of index names used to limit the request. + * By default, you must explicitly name the indices you are adding blocks to. + * To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ index: IndexName + /** The block type to add to the index. */ block: IndicesAddBlockIndicesBlockOptions + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } } export interface IndicesAddBlockResponse { @@ -11951,16 +19607,36 @@ export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeToke & { [property: string]: any } export interface IndicesAnalyzeRequest extends RequestBase { + /** Index used to derive the analyzer. + * If specified, the `analyzer` or field parameter overrides this value. + * If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. */ index?: IndexName + /** The name of the analyzer that should be applied to the provided `text`. + * This could be a built-in analyzer, or an analyzer that’s been configured in the index. */ analyzer?: string + /** Array of token attributes used to filter the output of the `explain` parameter. */ attributes?: string[] + /** Array of character filters used to preprocess characters before the tokenizer. */ char_filter?: AnalysisCharFilter[] + /** If `true`, the response includes token attributes and additional details. */ explain?: boolean + /** Field used to derive the analyzer. + * To use this parameter, you must specify an index. + * If specified, the `analyzer` parameter overrides this value. */ field?: Field + /** Array of token filters used to apply after the tokenizer. */ filter?: AnalysisTokenFilter[] + /** Normalizer to use to convert text into a single token. */ normalizer?: string + /** Text to analyze. + * If an array of strings is provided, it is analyzed as a multi-value field. */ text?: IndicesAnalyzeTextToAnalyze + /** Tokenizer to use to convert text into tokens. */ tokenizer?: AnalysisTokenizer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, analyzer?: never, attributes?: never, char_filter?: never, explain?: never, field?: never, filter?: never, normalizer?: never, text?: never, tokenizer?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, analyzer?: never, attributes?: never, char_filter?: never, explain?: never, field?: never, filter?: never, normalizer?: never, text?: never, tokenizer?: never } } export interface IndicesAnalyzeResponse { @@ -11976,32 +19652,69 @@ export interface IndicesAnalyzeTokenDetail { } export interface IndicesCancelMigrateReindexRequest extends RequestBase { + /** The index or data stream name */ index: Indices + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export type IndicesCancelMigrateReindexResponse = AcknowledgedResponseBase export interface IndicesClearCacheRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, clears the fields cache. + * Use the `fields` parameter to clear the cache of specific fields only. */ fielddata?: boolean + /** Comma-separated list of field names used to limit the `fielddata` parameter. */ fields?: Fields + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, clears the query cache. */ query?: boolean + /** If `true`, clears the request cache. */ request?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, fielddata?: never, fields?: never, ignore_unavailable?: never, query?: never, request?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, fielddata?: never, fields?: never, ignore_unavailable?: never, query?: never, request?: never } } export type IndicesClearCacheResponse = ShardsOperationResponseBase export interface IndicesCloneRequest extends RequestBase { + /** Name of the source index to clone. */ index: IndexName + /** Name of the target index to create. */ target: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** Aliases for the resulting index. */ aliases?: Record + /** Configuration options for the target index. */ settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } } export interface IndicesCloneResponse { @@ -12020,13 +19733,30 @@ export interface IndicesCloseCloseShardResult { } export interface IndicesCloseRequest extends RequestBase { + /** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } } export interface IndicesCloseResponse { @@ -12036,13 +19766,30 @@ export interface IndicesCloseResponse { } export interface IndicesCreateRequest extends RequestBase { + /** Name of the index you wish to create. */ index: IndexName + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** Aliases for the index. */ aliases?: Record + /** Mapping for fields in the index. If specified, this mapping can include: + * - Field names + * - Field data types + * - Mapping parameters */ mappings?: MappingTypeMapping + /** Configuration options for the index. */ settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, mappings?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, mappings?: never, settings?: never } } export interface IndicesCreateResponse { @@ -12052,23 +19799,44 @@ export interface IndicesCreateResponse { } export interface IndicesCreateDataStreamRequest extends RequestBase { + /** Name of the data stream, which must meet the following criteria: + * Lowercase only; + * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; + * Cannot start with `-`, `_`, `+`, or `.ds-`; + * Cannot be `.` or `..`; + * Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. */ name: DataStreamName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase export interface IndicesCreateFromCreateFrom { + /** Mappings overrides to be applied to the destination index (optional) */ mappings_override?: MappingTypeMapping + /** Settings overrides to be applied to the destination index (optional) */ settings_override?: IndicesIndexSettings + /** If index blocks should be removed when creating destination index (optional) */ remove_index_blocks?: boolean } export interface IndicesCreateFromRequest extends RequestBase { + /** The source index or data stream name */ source: IndexName + /** The destination index or data stream name */ dest: IndexName create_from?: IndicesCreateFromCreateFrom + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { source?: never, dest?: never, create_from?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { source?: never, dest?: never, create_from?: never } } export interface IndicesCreateFromResponse { @@ -12078,34 +19846,78 @@ export interface IndicesCreateFromResponse { } export interface IndicesDataStreamsStatsDataStreamsStatsItem { + /** Current number of backing indices for the data stream. */ backing_indices: integer + /** Name of the data stream. */ data_stream: Name + /** The data stream’s highest `@timestamp` value, converted to milliseconds since the Unix epoch. + * NOTE: This timestamp is provided as a best effort. + * The data stream may contain `@timestamp` values higher than this if one or more of the following conditions are met: + * The stream contains closed backing indices; + * Backing indices with a lower generation contain higher `@timestamp` values. */ maximum_timestamp: EpochTime + /** Total size of all shards for the data stream’s backing indices. + * This parameter is only returned if the `human` query parameter is `true`. */ store_size?: ByteSize + /** Total size, in bytes, of all shards for the data stream’s backing indices. */ store_size_bytes: long } export interface IndicesDataStreamsStatsRequest extends RequestBase { + /** Comma-separated list of data streams used to limit the request. + * Wildcard expressions (`*`) are supported. + * To target all data streams in a cluster, omit this parameter or use `*`. */ name?: IndexName + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never } } export interface IndicesDataStreamsStatsResponse { + /** Contains information about shards that attempted to execute the request. */ _shards: ShardStatistics + /** Total number of backing indices for the selected data streams. */ backing_indices: integer + /** Total number of selected data streams. */ data_stream_count: integer + /** Contains statistics for the selected data streams. */ data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] + /** Total size of all shards for the selected data streams. + * This property is included only if the `human` query parameter is `true` */ total_store_sizes?: ByteSize + /** Total size, in bytes, of all shards for the selected data streams. */ total_store_size_bytes: long } export interface IndicesDeleteRequest extends RequestBase { + /** Comma-separated list of indices to delete. + * You cannot specify index aliases. + * By default, this parameter does not support wildcards (`*`) or `_all`. + * To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteResponse = IndicesResponseBase @@ -12115,103 +19927,217 @@ export interface IndicesDeleteAliasIndicesAliasesResponseBody extends Acknowledg } export interface IndicesDeleteAliasRequest extends RequestBase { + /** Comma-separated list of data streams or indices used to limit the request. + * Supports wildcards (`*`). */ index: Indices + /** Comma-separated list of aliases to remove. + * Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. */ name: Names + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteAliasResponse = IndicesDeleteAliasIndicesAliasesResponseBody export interface IndicesDeleteDataLifecycleRequest extends RequestBase { + /** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ name: DataStreamNames + /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ expand_wildcards?: ExpandWildcards + /** Specify timeout for connection to master */ master_timeout?: Duration + /** Explicit timestamp for the document */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { + /** Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. */ name: DataStreamNames + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, expand_wildcards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, expand_wildcards?: never } } export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteIndexTemplateRequest extends RequestBase { + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Names + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesDeleteTemplateRequest extends RequestBase { + /** The name of the legacy index template to delete. + * Wildcard (`*`) expressions are supported. */ name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase export interface IndicesDiskUsageRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. */ index: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, the API performs a flush before analysis. + * If `false`, the response may not include uncommitted data. */ flush?: boolean + /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Analyzing field disk usage is resource-intensive. + * To use the API, this parameter must be set to `true`. */ run_expensive_tasks?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, run_expensive_tasks?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, run_expensive_tasks?: never } } export type IndicesDiskUsageResponse = any export interface IndicesDownsampleRequest extends RequestBase { + /** Name of the time series index to downsample. */ index: IndexName + /** Name of the index to create. */ target_index: IndexName config?: IndicesDownsampleConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target_index?: never, config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target_index?: never, config?: never } } export type IndicesDownsampleResponse = any export interface IndicesExistsRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } } export type IndicesExistsResponse = boolean export interface IndicesExistsAliasRequest extends RequestBase { + /** Comma-separated list of aliases to check. Supports wildcards (`*`). */ name: Names + /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. */ ignore_unavailable?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never } } export type IndicesExistsAliasResponse = boolean export interface IndicesExistsIndexTemplateRequest extends RequestBase { + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Name + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** If true, returns settings in flat format. */ flat_settings?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never } } export type IndicesExistsIndexTemplateResponse = boolean export interface IndicesExistsTemplateRequest extends RequestBase { + /** A comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. */ name: Names + /** Indicates whether to use a flat format for the response. */ flat_settings?: boolean + /** Indicates whether to get information from the local node only. */ local?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } } export type IndicesExistsTemplateResponse = boolean @@ -12229,9 +20155,16 @@ export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { } export interface IndicesExplainDataLifecycleRequest extends RequestBase { + /** The name of the index to explain */ index: Indices + /** indicates if the API should return the default values the system uses for the index's lifecycle */ include_defaults?: boolean + /** Specify timeout for connection to master */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, include_defaults?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, include_defaults?: never, master_timeout?: never } } export interface IndicesExplainDataLifecycleResponse { @@ -12266,11 +20199,24 @@ export interface IndicesFieldUsageStatsInvertedIndex { } export interface IndicesFieldUsageStatsRequest extends RequestBase { + /** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never } } export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody @@ -12292,30 +20238,60 @@ export interface IndicesFieldUsageStatsUsageStatsShards { } export interface IndicesFlushRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases to flush. + * Supports wildcards (`*`). + * To flush all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, the request forces a flush even if there are no changes to commit to the index. */ force?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the flush operation blocks until execution when another flush operation is running. + * If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. */ wait_if_ongoing?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, force?: never, ignore_unavailable?: never, wait_if_ongoing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, force?: never, ignore_unavailable?: never, wait_if_ongoing?: never } } export type IndicesFlushResponse = ShardsOperationResponseBase export interface IndicesForcemergeRequest extends RequestBase { + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Specify whether the index should be flushed after performing the operation (default: true) */ flush?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** The number of segments the index should be merged into (default: dynamic) */ max_num_segments?: long + /** Specify whether the operation should only expunge deleted documents */ only_expunge_deletes?: boolean + /** Should the request wait until the force merge is completed. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, max_num_segments?: never, only_expunge_deletes?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, max_num_segments?: never, only_expunge_deletes?: never, wait_for_completion?: never } } export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { + /** task contains a task id returned when wait_for_completion=false, + * you can use the task_id to get the status of the task at _tasks/ */ task?: string } @@ -12324,26 +20300,61 @@ export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] export interface IndicesGetRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and index aliases used to limit the request. + * Wildcard expressions (*) are supported. */ index: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as open,hidden. */ expand_wildcards?: ExpandWildcards + /** If true, returns settings in flat format. */ flat_settings?: boolean + /** If false, requests that target a missing index return an error. */ ignore_unavailable?: boolean + /** If true, return all default settings in the response. */ include_defaults?: boolean + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Return only information on specified index features */ features?: IndicesGetFeatures + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never, features?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never, features?: never } } export type IndicesGetResponse = Record export interface IndicesGetAliasRequest extends RequestBase { + /** Comma-separated list of aliases to retrieve. + * Supports wildcards (`*`). + * To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names + /** Comma-separated list of data streams or indices used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never } } export type IndicesGetAliasResponse = Record @@ -12365,10 +20376,21 @@ export interface IndicesGetDataLifecycleDataStreamWithLifecycle { } export interface IndicesGetDataLifecycleRequest extends RequestBase { + /** Comma-separated list of data streams to limit the request. + * Supports wildcards (`*`). + * To target all data streams, omit this parameter or use `*` or `_all`. */ name: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never } } export interface IndicesGetDataLifecycleResponse { @@ -12376,27 +20398,50 @@ export interface IndicesGetDataLifecycleResponse { } export interface IndicesGetDataLifecycleStatsDataStreamStats { + /** The count of the backing indices for the data stream. */ backing_indices_in_error: integer + /** The count of the backing indices for the data stream that have encountered an error. */ backing_indices_in_total: integer + /** The name of the data stream. */ name: DataStreamName } export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface IndicesGetDataLifecycleStatsResponse { + /** The count of data streams currently being managed by the data stream lifecycle. */ data_stream_count: integer + /** Information about the data streams that are managed by the data stream lifecycle. */ data_streams: IndicesGetDataLifecycleStatsDataStreamStats[] + /** The duration of the last data stream lifecycle execution. */ last_run_duration_in_millis?: DurationValue + /** The time that passed between the start of the last two data stream lifecycle executions. + * This value should amount approximately to `data_streams.lifecycle.poll_interval`. */ time_between_starts_in_millis?: DurationValue } export interface IndicesGetDataStreamRequest extends RequestBase { + /** Comma-separated list of data stream names used to limit the request. + * Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. */ name?: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Whether the maximum timestamp for each data stream should be calculated and returned. */ verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never, verbose?: never } } export interface IndicesGetDataStreamResponse { @@ -12404,13 +20449,30 @@ export interface IndicesGetDataStreamResponse { } export interface IndicesGetFieldMappingRequest extends RequestBase { + /** Comma-separated list or wildcard expression of fields used to limit returned information. + * Supports wildcards (`*`). */ fields: Fields + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } } export type IndicesGetFieldMappingResponse = Record @@ -12425,11 +20487,20 @@ export interface IndicesGetIndexTemplateIndexTemplateItem { } export interface IndicesGetIndexTemplateRequest extends RequestBase { + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name?: Name + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean + /** If true, returns settings in flat format. */ flat_settings?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never, include_defaults?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never, include_defaults?: never } } export interface IndicesGetIndexTemplateResponse { @@ -12442,18 +20513,39 @@ export interface IndicesGetMappingIndexMappingRecord { } export interface IndicesGetMappingRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never } } export type IndicesGetMappingResponse = Record export interface IndicesGetMigrateReindexStatusRequest extends RequestBase { + /** The index or data stream name. */ index: Indices + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export interface IndicesGetMigrateReindexStatusResponse { @@ -12481,30 +20573,67 @@ export interface IndicesGetMigrateReindexStatusStatusInProgress { } export interface IndicesGetSettingsRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (`*`). To target all data streams and + * indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** Comma-separated list or wildcard expression of settings to retrieve. */ name?: Names + /** If `false`, the request returns an error if any wildcard expression, index + * alias, or `_all` value targets only missing or closed indices. This + * behavior applies even if the request targets other open indices. For + * example, a request targeting `foo*,bar*` returns an error if an index + * starts with foo but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. If + * `false`, information is retrieved from the master node. */ local?: boolean + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never } } export type IndicesGetSettingsResponse = Record export interface IndicesGetTemplateRequest extends RequestBase { + /** Comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. + * To return all index templates, omit this parameter or use a value of `_all` or `*`. */ name?: Names + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `true`, the request retrieves information from the local node only. */ local?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } } export type IndicesGetTemplateResponse = Record export interface IndicesMigrateReindexMigrateReindex { + /** Reindex mode. Currently only 'upgrade' is supported. */ mode: IndicesMigrateReindexModeEnum + /** The source index or data stream (only data streams are currently supported). */ source: IndicesMigrateReindexSourceIndex } @@ -12512,6 +20641,10 @@ export type IndicesMigrateReindexModeEnum = 'upgrade' export interface IndicesMigrateReindexRequest extends RequestBase { reindex?: IndicesMigrateReindexMigrateReindex + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { reindex?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { reindex?: never } } export type IndicesMigrateReindexResponse = AcknowledgedResponseBase @@ -12521,37 +20654,79 @@ export interface IndicesMigrateReindexSourceIndex { } export interface IndicesMigrateToDataStreamRequest extends RequestBase { + /** Name of the index alias to convert to a data stream. */ name: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase export interface IndicesModifyDataStreamAction { + /** Adds an existing index as a backing index for a data stream. + * The index is hidden as part of this operation. + * WARNING: Adding indices with the `add_backing_index` action can potentially result in improper data stream behavior. + * This should be considered an expert level API. */ add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction + /** Removes a backing index from a data stream. + * The index is unhidden as part of this operation. + * A data stream’s write index cannot be removed. */ remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction } export interface IndicesModifyDataStreamIndexAndDataStreamAction { + /** Data stream targeted by the action. */ data_stream: DataStreamName + /** Index for the action. */ index: IndexName } export interface IndicesModifyDataStreamRequest extends RequestBase { + /** Actions to perform. */ actions: IndicesModifyDataStreamAction[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never } } export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase export interface IndicesOpenRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * By default, you must explicitly name the indices you using to limit the request. + * To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. + * You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } } export interface IndicesOpenResponse { @@ -12560,113 +20735,294 @@ export interface IndicesOpenResponse { } export interface IndicesPromoteDataStreamRequest extends RequestBase { + /** The name of the data stream */ name: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } } export type IndicesPromoteDataStreamResponse = any export interface IndicesPutAliasRequest extends RequestBase { + /** Comma-separated list of data streams or indices to add. + * Supports wildcards (`*`). + * Wildcard patterns that match both data streams and indices return an error. */ index: Indices + /** Alias to update. + * If the alias doesn’t exist, the request creates it. + * Index alias names support date math. */ name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. + * Data stream aliases don’t support this parameter. */ index_routing?: Routing + /** If `true`, sets the write index or data stream for the alias. + * If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. + * If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. + * Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. + * Data stream aliases don’t support this parameter. */ routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. + * Data stream aliases don’t support this parameter. */ search_routing?: Routing + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never, filter?: never, index_routing?: never, is_write_index?: never, routing?: never, search_routing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never, filter?: never, index_routing?: never, is_write_index?: never, routing?: never, search_routing?: never } } export type IndicesPutAliasResponse = AcknowledgedResponseBase export interface IndicesPutDataLifecycleRequest extends RequestBase { + /** Comma-separated list of data streams used to limit the request. + * Supports wildcards (`*`). + * To target all data streams use `*` or `_all`. */ name: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration + /** The downsampling configuration to execute for the managed backing index after rollover. */ downsampling?: IndicesDataStreamLifecycleDownsampling + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, data_retention?: never, downsampling?: never, enabled?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, data_retention?: never, downsampling?: never, enabled?: never } } export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesPutIndexTemplateIndexTemplateMapping { + /** Aliases to add. + * If the index template includes a `data_stream` object, these are data stream aliases. + * Otherwise, these are index aliases. + * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ aliases?: Record + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping parameters. */ mappings?: MappingTypeMapping + /** Configuration options for the index. */ settings?: IndicesIndexSettings lifecycle?: IndicesDataStreamLifecycle } export interface IndicesPutIndexTemplateRequest extends RequestBase { + /** Index or template name */ name: Name + /** If `true`, this request cannot replace or update existing index templates. */ create?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** User defined reason for creating/updating the index template */ cause?: string + /** Name of the index template to create. */ index_patterns?: Indices + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[] + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. + * External systems can use these version numbers to simplify template management. + * To unset a version, replace the template without specifying one. */ version?: VersionNumber + /** Optional user metadata about the index template. + * It may have any contents. + * It is not automatically generated or used by Elasticsearch. + * This user-defined object is stored in the cluster state, so keeping it short is preferable + * To unset the metadata, replace the template without specifying it. */ _meta?: Metadata + /** This setting overrides the value of the `action.auto_create_index` cluster setting. + * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. + * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean + /** The configuration option ignore_missing_component_templates can be used when an index template + * references a component template that might not exist */ ignore_missing_component_templates?: string[] + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, allow_auto_create?: never, ignore_missing_component_templates?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, allow_auto_create?: never, ignore_missing_component_templates?: never, deprecated?: never } } export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesPutMappingRequest extends RequestBase { + /** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** If `true`, the mappings are applied only to the current write index for the target. */ write_index_only?: boolean + /** Controls whether dynamic date detection is enabled. */ date_detection?: boolean + /** Controls whether new fields are added dynamically. */ dynamic?: MappingDynamicMapping + /** If date detection is enabled then new string fields are checked + * against 'dynamic_date_formats' and if the value matches then + * a new date field is added instead of string. */ dynamic_date_formats?: string[] + /** Specify dynamic templates for the mapping. */ dynamic_templates?: Partial>[] + /** Control whether field names are enabled for the index. */ _field_names?: MappingFieldNamesField + /** A mapping type can have custom meta data associated with it. These are + * not used at all by Elasticsearch, but can be used to store + * application-specific metadata. */ _meta?: Metadata + /** Automatically map strings into numeric data types for all fields. */ numeric_detection?: boolean + /** Mapping for a field. For new fields, this mapping can include: + * + * - Field name + * - Field data type + * - Mapping parameters */ properties?: Record + /** Enable making a routing value required on indexed documents. */ _routing?: MappingRoutingField + /** Control whether the _source field is enabled on the index. */ _source?: MappingSourceField + /** Mapping of runtime fields for the index. */ runtime?: MappingRuntimeFields + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, write_index_only?: never, date_detection?: never, dynamic?: never, dynamic_date_formats?: never, dynamic_templates?: never, _field_names?: never, _meta?: never, numeric_detection?: never, properties?: never, _routing?: never, _source?: never, runtime?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, write_index_only?: never, date_detection?: never, dynamic?: never, dynamic_date_formats?: never, dynamic_templates?: never, _field_names?: never, _meta?: never, numeric_detection?: never, properties?: never, _routing?: never, _source?: never, runtime?: never } } export type IndicesPutMappingResponse = IndicesResponseBase export interface IndicesPutSettingsRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (`*`). To target all data streams and + * indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index + * alias, or `_all` value targets only missing or closed indices. This + * behavior applies even if the request targets other open indices. For + * example, a request targeting `foo*,bar*` returns an error if an index + * starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. Supports comma-separated values, such as + * `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ flat_settings?: boolean + /** If `true`, returns settings in flat format. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration + /** If `true`, existing index settings remain unchanged. */ preserve_existing?: boolean + /** Whether to close and reopen the index to apply non-dynamic settings. + * If set to `true` the indices to which the settings are being applied + * will be closed temporarily and then reopened in order to apply the changes. */ reopen?: boolean + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, reopen?: never, timeout?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, reopen?: never, timeout?: never, settings?: never } } export type IndicesPutSettingsResponse = AcknowledgedResponseBase export interface IndicesPutTemplateRequest extends RequestBase { + /** The name of the template */ name: Name + /** If true, this request cannot replace or update existing index templates. */ create?: boolean + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** User defined reason for creating/updating the index template */ cause?: string + /** Aliases for the index. */ aliases?: Record + /** Array of wildcard expressions used to match the names + * of indices during creation. */ index_patterns?: string | string[] + /** Mapping for fields in the index. */ mappings?: MappingTypeMapping + /** Order in which Elasticsearch applies this template if index + * matches multiple templates. + * + * Templates with lower 'order' values are merged first. Templates with higher + * 'order' values are merged later, overriding templates with lower values. */ order?: integer + /** Configuration options for the index. */ settings?: IndicesIndexSettings + /** Version number used to manage index templates externally. This number + * is not automatically generated by Elasticsearch. + * To unset a version, replace the template without specifying one. */ version?: VersionNumber + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, aliases?: never, index_patterns?: never, mappings?: never, order?: never, settings?: never, version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, aliases?: never, index_patterns?: never, mappings?: never, order?: never, settings?: never, version?: never } } export type IndicesPutTemplateResponse = AcknowledgedResponseBase @@ -12736,12 +21092,27 @@ export interface IndicesRecoveryRecoveryStatus { } export interface IndicesRecoveryRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean + /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } } export type IndicesRecoveryResponse = Record @@ -12782,10 +21153,23 @@ export interface IndicesRecoveryVerifyIndex { } export interface IndicesRefreshRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } } export type IndicesRefreshResponse = ShardsOperationResponseBase @@ -12802,39 +21186,101 @@ export interface IndicesReloadSearchAnalyzersReloadResult { } export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { + /** A comma-separated list of index names to reload analyzers for */ index: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** Changed resource to reload analyzers from if applicable */ resource?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, resource?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, resource?: never } } export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult export interface IndicesResolveClusterRequest extends RequestBase { + /** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. + * Resources on remote clusters can be specified using the ``:`` syntax. + * Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. + * If no index expression is specified, information about all remote clusters configured on the local cluster + * is returned without doing any index matching */ name?: Names + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing + * or closed indices. This behavior applies even if the request targets other open indices. For example, a request + * targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded, or aliased indices are ignored when frozen. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_throttled?: boolean + /** If false, the request returns an error if it targets a missing or closed index. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_unavailable?: boolean + /** The maximum time to wait for remote clusters to respond. + * If a remote cluster does not respond within this timeout period, the API response + * will show the cluster as not connected and include an error message that the + * request timed out. + * + * The default timeout is unset and the query can take + * as long as the networking layer is configured to wait for remote clusters that are + * not responding (typically 30 seconds). */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, timeout?: never } } export interface IndicesResolveClusterResolveClusterInfo { + /** Whether the remote cluster is connected to the local (querying) cluster. */ connected: boolean + /** The `skip_unavailable` setting for a remote cluster. */ skip_unavailable: boolean + /** Whether the index expression provided in the request matches any indices, aliases or data streams + * on the cluster. */ matching_indices?: boolean + /** Provides error messages that are likely to occur if you do a search with this index expression + * on the specified cluster (for example, lack of security privileges to query an index). */ error?: string + /** Provides version information about the cluster. */ version?: ElasticsearchVersionMinInfo } export type IndicesResolveClusterResponse = Record export interface IndicesResolveIndexRequest extends RequestBase { + /** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. + * Resources on remote clusters can be specified using the ``:`` syntax. */ name: Names + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never } } export interface IndicesResolveIndexResolveIndexAliasItem { @@ -12862,17 +21308,45 @@ export interface IndicesResolveIndexResponse { } export interface IndicesRolloverRequest extends RequestBase { + /** Name of the data stream or index alias to roll over. */ alias: IndexAlias + /** Name of the index to create. + * Supports date math. + * Data streams do not support this parameter. */ new_index?: IndexName + /** If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. */ dry_run?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. + * Only allowed on data streams. */ lazy?: boolean + /** Aliases for the target index. + * Data streams do not support this parameter. */ aliases?: Record + /** Conditions for the rollover. + * If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. + * If this parameter is not specified, Elasticsearch performs the rollover unconditionally. + * If conditions are specified, at least one of them must be a `max_*` condition. + * The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ conditions?: IndicesRolloverRolloverConditions + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping paramaters. */ mappings?: MappingTypeMapping + /** Configuration options for the index. + * Data streams do not support this parameter. */ settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } } export interface IndicesRolloverResponse { @@ -12908,11 +21382,25 @@ export interface IndicesSegmentsIndexSegment { } export interface IndicesSegmentsRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, the request returns a verbose response. */ verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, verbose?: never } } export interface IndicesSegmentsResponse { @@ -12950,11 +21438,23 @@ export interface IndicesShardStoresIndicesShardStores { } export interface IndicesShardStoresRequest extends RequestBase { + /** List of data streams, indices, and aliases used to limit the request. */ index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all + * value targets only missing or closed indices. This behavior applies even if the request + * targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, + * this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** List of shard health statuses used to limit the request. */ status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, status?: never } } export interface IndicesShardStoresResponse { @@ -12992,13 +21492,28 @@ export interface IndicesShardStoresShardStoreWrapper { } export interface IndicesShrinkRequest extends RequestBase { + /** Name of the source index to shrink. */ index: IndexName + /** Name of the target index to create. */ target: IndexName + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** The key is the alias name. + * Index alias names support date math. */ aliases?: Record + /** Configuration options for the target index. */ settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } } export interface IndicesShrinkResponse { @@ -13008,11 +21523,20 @@ export interface IndicesShrinkResponse { } export interface IndicesSimulateIndexTemplateRequest extends RequestBase { + /** Name of the index to simulate */ name: Name + /** Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one */ create?: boolean + /** User defined reason for dry-run creating the new template for simulation purposes */ cause?: string + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never } } export interface IndicesSimulateIndexTemplateResponse { @@ -13026,21 +21550,55 @@ export interface IndicesSimulateTemplateOverlapping { } export interface IndicesSimulateTemplateRequest extends RequestBase { + /** Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit + * this parameter and specify the template configuration in the request body. */ name?: Name + /** If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. */ create?: boolean + /** User defined reason for dry-run creating the new template for simulation purposes */ cause?: string + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean + /** This setting overrides the value of the `action.auto_create_index` cluster setting. + * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. + * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean + /** Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. */ index_patterns?: Indices + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[] + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. */ version?: VersionNumber + /** Optional user metadata about the index template. + * May have any contents. + * This map is not automatically generated by Elasticsearch. */ _meta?: Metadata + /** The configuration option ignore_missing_component_templates can be used when an index template + * references a component template that might not exist */ ignore_missing_component_templates?: string[] + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } } export interface IndicesSimulateTemplateResponse { @@ -13055,13 +21613,27 @@ export interface IndicesSimulateTemplateTemplate { } export interface IndicesSplitRequest extends RequestBase { + /** Name of the source index to split. */ index: IndexName + /** Name of the target index to create. */ target: IndexName + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards + /** Aliases for the resulting index. */ aliases?: Record + /** Configuration options for the target index. */ settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } } export interface IndicesSplitResponse { @@ -13073,22 +21645,39 @@ export interface IndicesSplitResponse { export type IndicesStatsIndexMetadataState = 'open' | 'close' export interface IndicesStatsIndexStats { + /** Contains statistics about completions across all shards assigned to the node. */ completion?: CompletionStats + /** Contains statistics about documents across all primary shards assigned to the node. */ docs?: DocStats + /** Contains statistics about the field data cache across all shards assigned to the node. */ fielddata?: FielddataStats + /** Contains statistics about flush operations for the node. */ flush?: FlushStats + /** Contains statistics about get operations for the node. */ get?: GetStats + /** Contains statistics about indexing operations for the node. */ indexing?: IndexingStats + /** Contains statistics about indices operations for the node. */ indices?: IndicesStatsIndicesStats + /** Contains statistics about merge operations for the node. */ merges?: MergesStats + /** Contains statistics about the query cache across all shards assigned to the node. */ query_cache?: QueryCacheStats + /** Contains statistics about recovery operations for the node. */ recovery?: RecoveryStats + /** Contains statistics about refresh operations for the node. */ refresh?: RefreshStats + /** Contains statistics about the request cache across all shards assigned to the node. */ request_cache?: RequestCacheStats + /** Contains statistics about search operations for the node. */ search?: SearchStats + /** Contains statistics about segments across all shards assigned to the node. */ segments?: SegmentsStats + /** Contains statistics about the size of shards assigned to the node. */ store?: StoreStats + /** Contains statistics about transaction log operations for the node. */ translog?: TranslogStats + /** Contains statistics about index warming operations for the node. */ warmer?: WarmerStats bulk?: BulkStats shard_stats?: IndicesStatsShardsTotalStats @@ -13110,17 +21699,34 @@ export interface IndicesStatsMappingStats { } export interface IndicesStatsRequest extends RequestBase { + /** Limit the information returned the specific metrics. */ metric?: Metrics + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices + /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ fielddata_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields + /** If true, statistics are not collected from closed indices. */ forbid_closed_indices?: boolean + /** Comma-separated list of search groups to include in the search statistics. */ groups?: string | string[] + /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ include_segment_file_sizes?: boolean + /** If true, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ level?: Level + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, index?: never, completion_fields?: never, expand_wildcards?: never, fielddata_fields?: never, fields?: never, forbid_closed_indices?: never, groups?: never, include_segment_file_sizes?: never, include_unloaded_segments?: never, level?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, index?: never, completion_fields?: never, expand_wildcards?: never, fielddata_fields?: never, fields?: never, forbid_closed_indices?: never, groups?: never, include_segment_file_sizes?: never, include_unloaded_segments?: never, level?: never } } export interface IndicesStatsResponse { @@ -13223,13 +21829,31 @@ export interface IndicesStatsShardsTotalStats { } export interface IndicesUnfreezeRequest extends RequestBase { + /** Identifier for the index. */ index: IndexName + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } } export interface IndicesUnfreezeResponse { @@ -13238,43 +21862,91 @@ export interface IndicesUnfreezeResponse { } export interface IndicesUpdateAliasesAction { + /** Adds a data stream or index to an alias. + * If the alias doesn’t exist, the `add` action creates it. */ add?: IndicesUpdateAliasesAddAction + /** Removes a data stream or index from an alias. */ remove?: IndicesUpdateAliasesRemoveAction + /** Deletes an index. + * You cannot use this action on aliases or data streams. */ remove_index?: IndicesUpdateAliasesRemoveIndexAction } export interface IndicesUpdateAliasesAddAction { + /** Alias for the action. + * Index alias names support date math. */ alias?: IndexAlias + /** Aliases for the action. + * Index alias names support date math. */ aliases?: IndexAlias | IndexAlias[] + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. + * Data stream aliases don’t support this parameter. */ index_routing?: Routing + /** If `true`, the alias is hidden. */ is_hidden?: boolean + /** If `true`, sets the write index or data stream for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. + * Data stream aliases don’t support this parameter. */ routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. + * Data stream aliases don’t support this parameter. */ search_routing?: Routing + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRemoveAction { + /** Alias for the action. + * Index alias names support date math. */ alias?: IndexAlias + /** Aliases for the action. + * Index alias names support date math. */ aliases?: IndexAlias | IndexAlias[] + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRemoveIndexAction { + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRequest extends RequestBase { + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Actions to perform. */ actions?: IndicesUpdateAliasesAction[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, actions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, actions?: never } } export type IndicesUpdateAliasesResponse = AcknowledgedResponseBase @@ -13287,20 +21959,45 @@ export interface IndicesValidateQueryIndicesValidationExplanation { } export interface IndicesValidateQueryRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases to search. + * Supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean + /** If `true`, the validation is executed on all shards instead of one random shard per index. */ all_shards?: boolean + /** Analyzer to use for the query string. + * This parameter can only be used when the `q` query string parameter is specified. */ analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. */ analyze_wildcard?: boolean + /** The default operator for query string query: `AND` or `OR`. */ default_operator?: QueryDslOperator + /** Field to use as default where no field prefix is given in the query string. + * This parameter can only be used when the `q` query string parameter is specified. */ df?: string + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If `true`, the response returns detailed information if an error has occurred. */ explain?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. */ lenient?: boolean + /** If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. */ rewrite?: boolean + /** Query in the Lucene query string syntax. */ q?: string + /** Query in the Lucene query string syntax. */ query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, all_shards?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, explain?: never, ignore_unavailable?: never, lenient?: never, rewrite?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, all_shards?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, explain?: never, ignore_unavailable?: never, lenient?: never, rewrite?: never, q?: never, query?: never } } export interface IndicesValidateQueryResponse { @@ -13311,43 +22008,114 @@ export interface IndicesValidateQueryResponse { } export interface InferenceAdaptiveAllocations { + /** Turn on `adaptive_allocations`. */ enabled?: boolean + /** The maximum number of allocations to scale to. + * If set, it must be greater than or equal to `min_number_of_allocations`. */ max_number_of_allocations?: integer + /** The minimum number of allocations to scale to. + * If set, it must be greater than or equal to 0. + * If not defined, the deployment scales to 0. */ min_number_of_allocations?: integer } export interface InferenceAlibabaCloudServiceSettings { + /** A valid API key for the AlibabaCloud AI Search API. */ api_key: string + /** The name of the host address used for the inference task. + * You can find the host address in the API keys section of the documentation. */ host: string + /** This setting helps to minimize the number of rate limit errors returned from AlibabaCloud AI Search. + * By default, the `alibabacloud-ai-search` service sets the number of requests allowed per minute to `1000`. */ rate_limit?: InferenceRateLimitSetting + /** The name of the model service to use for the inference task. + * The following service IDs are available for the `completion` task: + * + * * `ops-qwen-turbo` + * * `qwen-turbo` + * * `qwen-plus` + * * `qwen-max ÷ qwen-max-longcontext` + * + * The following service ID is available for the `rerank` task: + * + * * `ops-bge-reranker-larger` + * + * The following service ID is available for the `sparse_embedding` task: + * + * * `ops-text-sparse-embedding-001` + * + * The following service IDs are available for the `text_embedding` task: + * + * `ops-text-embedding-001` + * `ops-text-embedding-zh-001` + * `ops-text-embedding-en-001` + * `ops-text-embedding-002` */ service_id: string + /** The name of the workspace used for the inference task. */ workspace: string } export type InferenceAlibabaCloudServiceType = 'alibabacloud-ai-search' export interface InferenceAlibabaCloudTaskSettings { + /** For a `sparse_embedding` or `text_embedding` task, specify the type of input passed to the model. + * Valid values are: + * + * * `ingest` for storing document embeddings in a vector database. + * * `search` for storing embeddings of search queries run against a vector database to find relevant documents. */ input_type?: string + /** For a `sparse_embedding` task, it affects whether the token name will be returned in the response. + * It defaults to `false`, which means only the token ID will be returned in the response. */ return_token?: boolean } export type InferenceAlibabaCloudTaskType = 'completion' | 'rerank' | 'space_embedding' | 'text_embedding' export interface InferenceAmazonBedrockServiceSettings { + /** A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests. */ access_key: string + /** The base model ID or an ARN to a custom model based on a foundational model. + * The base model IDs can be found in the Amazon Bedrock documentation. + * Note that the model ID must be available for the provider chosen and your IAM user must have access to the model. */ model: string + /** The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `amazontitan` - available for `text_embedding` and `completion` task types + * * `anthropic` - available for `completion` task type only + * * `ai21labs` - available for `completion` task type only + * * `cohere` - available for `text_embedding` and `completion` task types + * * `meta` - available for `completion` task type only + * * `mistral` - available for `completion` task type only */ provider?: string + /** The region that your model or ARN is deployed in. + * The list of available regions per model can be found in the Amazon Bedrock documentation. */ region: string + /** This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ rate_limit?: InferenceRateLimitSetting + /** A valid AWS secret key that is paired with the `access_key`. + * For informationg about creating and managing access and secret keys, refer to the AWS documentation. */ secret_key: string } export type InferenceAmazonBedrockServiceType = 'amazonbedrock' export interface InferenceAmazonBedrockTaskSettings { + /** For a `completion` task, it sets the maximum number for the output tokens to be generated. */ max_new_tokens?: integer + /** For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results. + * At temperature 0.0 the model is most deterministic, at temperature 1.0 most random. + * It should not be used if `top_p` or `top_k` is specified. */ temperature?: float + /** For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability. + * It is only available for anthropic, cohere, and mistral providers. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ top_k?: float + /** For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens. + * Top-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ top_p?: float } @@ -13356,77 +22124,175 @@ export type InferenceAmazonBedrockTaskType = 'completion' | 'text_embedding' export type InferenceAmazonSageMakerApi = 'openai' | 'elastic' export interface InferenceAmazonSageMakerServiceSettings { + /** A valid AWS access key that has permissions to use Amazon SageMaker and access to models for invoking requests. */ access_key: string + /** The name of the SageMaker endpoint. */ endpoint_name: string + /** The API format to use when calling SageMaker. + * Elasticsearch will convert the POST _inference request to this data format when invoking the SageMaker endpoint. */ api: InferenceAmazonSageMakerApi + /** The region that your endpoint or Amazon Resource Name (ARN) is deployed in. + * The list of available regions per model can be found in the Amazon SageMaker documentation. */ region: string + /** A valid AWS secret key that is paired with the `access_key`. + * For information about creating and managing access and secret keys, refer to the AWS documentation. */ secret_key: string + /** The model ID when calling a multi-model endpoint. */ target_model?: string + /** The container to directly invoke when calling a multi-container endpoint. */ target_container_hostname?: string + /** The inference component to directly invoke when calling a multi-component endpoint. */ inference_component_name?: string + /** The maximum number of inputs in each batch. This value is used by inference ingestion pipelines + * when processing semantic values. It correlates to the number of times the SageMaker endpoint is + * invoked (one per batch of input). */ batch_size?: integer + /** The number of dimensions returned by the text embedding models. If this value is not provided, then + * it is guessed by making invoking the endpoint for the `text_embedding` task. */ dimensions?: integer } export type InferenceAmazonSageMakerServiceType = 'amazon_sagemaker' export interface InferenceAmazonSageMakerTaskSettings { + /** The AWS custom attributes passed verbatim through to the model running in the SageMaker Endpoint. + * Values will be returned in the `X-elastic-sagemaker-custom-attributes` header. */ custom_attributes?: string + /** The optional JMESPath expression used to override the EnableExplanations provided during endpoint creation. */ enable_explanations?: string + /** The capture data ID when enabled in the endpoint. */ inference_id?: string + /** The stateful session identifier for a new or existing session. + * New sessions will be returned in the `X-elastic-sagemaker-new-session-id` header. + * Closed sessions will be returned in the `X-elastic-sagemaker-closed-session-id` header. */ session_id?: string + /** Specifies the variant when running with multi-variant Endpoints. */ target_variant?: string } export interface InferenceAnthropicServiceSettings { + /** A valid API key for the Anthropic API. */ api_key: string + /** The name of the model to use for the inference task. + * Refer to the Anthropic documentation for the list of supported models. */ model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Anthropic. + * By default, the `anthropic` service sets the number of requests allowed per minute to 50. */ rate_limit?: InferenceRateLimitSetting } export type InferenceAnthropicServiceType = 'anthropic' export interface InferenceAnthropicTaskSettings { + /** For a `completion` task, it is the maximum number of tokens to generate before stopping. */ max_tokens: integer + /** For a `completion` task, it is the amount of randomness injected into the response. + * For more details about the supported range, refer to Anthropic documentation. */ temperature?: float + /** For a `completion` task, it specifies to only sample from the top K options for each subsequent token. + * It is recommended for advanced use cases only. + * You usually only need to use `temperature`. */ top_k?: integer + /** For a `completion` task, it specifies to use Anthropic's nucleus sampling. + * In nucleus sampling, Anthropic computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches the specified probability. + * You should either alter `temperature` or `top_p`, but not both. + * It is recommended for advanced use cases only. + * You usually only need to use `temperature`. */ top_p?: float } export type InferenceAnthropicTaskType = 'completion' export interface InferenceAzureAiStudioServiceSettings { + /** A valid API key of your Azure AI Studio model deployment. + * This key can be found on the overview page for your deployment in the management section of your Azure AI Studio account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string + /** The type of endpoint that is available for deployment through Azure AI Studio: `token` or `realtime`. + * The `token` endpoint type is for "pay as you go" endpoints that are billed per token. + * The `realtime` endpoint type is for "real-time" endpoints that are billed per hour of usage. */ endpoint_type: string + /** The target URL of your Azure AI Studio model deployment. + * This can be found on the overview page for your deployment in the management section of your Azure AI Studio account. */ target: string + /** The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `cohere` - available for `text_embedding` and `completion` task types + * * `databricks` - available for `completion` task type only + * * `meta` - available for `completion` task type only + * * `microsoft_phi` - available for `completion` task type only + * * `mistral` - available for `completion` task type only + * * `openai` - available for `text_embedding` and `completion` task types */ provider: string + /** This setting helps to minimize the number of rate limit errors returned from Azure AI Studio. + * By default, the `azureaistudio` service sets the number of requests allowed per minute to 240. */ rate_limit?: InferenceRateLimitSetting } export type InferenceAzureAiStudioServiceType = 'azureaistudio' export interface InferenceAzureAiStudioTaskSettings { + /** For a `completion` task, instruct the inference process to perform sampling. + * It has no effect unless `temperature` or `top_p` is specified. */ do_sample?: float + /** For a `completion` task, provide a hint for the maximum number of output tokens to be generated. */ max_new_tokens?: integer + /** For a `completion` task, control the apparent creativity of generated completions with a sampling temperature. + * It must be a number in the range of 0.0 to 2.0. + * It should not be used if `top_p` is specified. */ temperature?: float + /** For a `completion` task, make the model consider the results of the tokens with nucleus sampling probability. + * It is an alternative value to `temperature` and must be a number in the range of 0.0 to 2.0. + * It should not be used if `temperature` is specified. */ top_p?: float + /** For a `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ user?: string } export type InferenceAzureAiStudioTaskType = 'completion' | 'text_embedding' export interface InferenceAzureOpenAIServiceSettings { + /** A valid API key for your Azure OpenAI account. + * You must specify either `api_key` or `entra_id`. + * If you do not provide either or you provide both, you will receive an error when you try to create your model. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key?: string + /** The Azure API version ID to use. + * It is recommended to use the latest supported non-preview version. */ api_version: string + /** The deployment name of your deployed models. + * Your Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription. */ deployment_id: string + /** A valid Microsoft Entra token. + * You must specify either `api_key` or `entra_id`. + * If you do not provide either or you provide both, you will receive an error when you try to create your model. */ entra_id?: string + /** This setting helps to minimize the number of rate limit errors returned from Azure. + * The `azureopenai` service sets a default number of requests allowed per minute depending on the task type. + * For `text_embedding`, it is set to `1440`. + * For `completion`, it is set to `120`. */ rate_limit?: InferenceRateLimitSetting + /** The name of your Azure OpenAI resource. + * You can find this from the list of resources in the Azure Portal for your subscription. */ resource_name: string } export type InferenceAzureOpenAIServiceType = 'azureopenai' export interface InferenceAzureOpenAITaskSettings { + /** For a `completion` or `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ user?: string } @@ -13437,10 +22303,35 @@ export type InferenceCohereEmbeddingType = 'binary' | 'bit' | 'byte' | 'float' | export type InferenceCohereInputType = 'classification' | 'clustering' | 'ingest' | 'search' export interface InferenceCohereServiceSettings { + /** A valid API key for your Cohere account. + * You can find or create your Cohere API keys on the Cohere API key settings page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string + /** For a `text_embedding` task, the types of embeddings you want to get back. + * Use `binary` for binary embeddings, which are encoded as bytes with signed int8 precision. + * Use `bit` for binary embeddings, which are encoded as bytes with signed int8 precision (this is a synonym of `binary`). + * Use `byte` for signed int8 embeddings (this is a synonym of `int8`). + * Use `float` for the default float embeddings. + * Use `int8` for signed int8 embeddings. */ embedding_type?: InferenceCohereEmbeddingType + /** For a `completion`, `rerank`, or `text_embedding` task, the name of the model to use for the inference task. + * + * * For the available `completion` models, refer to the [Cohere command docs](https://docs.cohere.com/docs/models#command). + * * For the available `rerank` models, refer to the [Cohere rerank docs](https://docs.cohere.com/reference/rerank-1). + * * For the available `text_embedding` models, refer to [Cohere embed docs](https://docs.cohere.com/reference/embed). + * + * The default value for a text embedding task is `embed-english-v2.0`. */ model_id?: string + /** This setting helps to minimize the number of rate limit errors returned from Cohere. + * By default, the `cohere` service sets the number of requests allowed per minute to 10000. */ rate_limit?: InferenceRateLimitSetting + /** The similarity measure. + * If the `embedding_type` is `float`, the default value is `dot_product`. + * If the `embedding_type` is `int8` or `byte`, the default value is `cosine`. */ similarity?: InferenceCohereSimilarityType } @@ -13449,9 +22340,28 @@ export type InferenceCohereServiceType = 'cohere' export type InferenceCohereSimilarityType = 'cosine' | 'dot_product' | 'l2_norm' export interface InferenceCohereTaskSettings { + /** For a `text_embedding` task, the type of input passed to the model. + * Valid values are: + * + * * `classification`: Use it for embeddings passed through a text classifier. + * * `clustering`: Use it for the embeddings run through a clustering algorithm. + * * `ingest`: Use it for storing document embeddings in a vector database. + * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. + * + * IMPORTANT: The `input_type` field is required when using embedding models `v3` and higher. */ input_type?: InferenceCohereInputType + /** For a `rerank` task, return doc text within the results. */ return_documents?: boolean + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. + * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ top_n?: integer + /** For a `text_embedding` task, the method to handle inputs longer than the maximum token length. + * Valid values are: + * + * * `END`: When the input exceeds the maximum input token length, the end of the input is discarded. + * * `NONE`: When the input exceeds the maximum input token length, an error is returned. + * * `START`: When the input exceeds the maximum input token length, the start of the input is discarded. */ truncate?: InferenceCohereTruncateType } @@ -13468,62 +22378,282 @@ export interface InferenceCompletionResult { } export interface InferenceCompletionTool { + /** The type of tool. */ type: string + /** The function definition. */ function: InferenceCompletionToolFunction } export interface InferenceCompletionToolChoice { + /** The type of the tool. */ type: string + /** The tool choice function. */ function: InferenceCompletionToolChoiceFunction } export interface InferenceCompletionToolChoiceFunction { + /** The name of the function to call. */ name: string } export interface InferenceCompletionToolFunction { + /** A description of what the function does. + * This is used by the model to choose when and how to call the function. */ description?: string + /** The name of the function. */ name: string + /** The parameters the functional accepts. This should be formatted as a JSON object. */ parameters?: any + /** Whether to enable schema adherence when generating the function call. */ strict?: boolean } export type InferenceCompletionToolType = string | InferenceCompletionToolChoice export interface InferenceContentObject { + /** The text content. */ text: string + /** The type of content. */ type: string } export interface InferenceCustomRequestParams { + /** The body structure of the request. It requires passing in the string-escaped result of the JSON format HTTP request body. + * For example: + * ``` + * "request": "{\"input\":${input}}" + * ``` + * > info + * > The content string needs to be a single line except when using the Kibana console. */ content: string } export interface InferenceCustomResponseParams { + /** Specifies the JSON parser that is used to parse the response from the custom service. + * Different task types require different json_parser parameters. + * For example: + * ``` + * # text_embedding + * # For a response like this: + * + * { + * "object": "list", + * "data": [ + * { + * "object": "embedding", + * "index": 0, + * "embedding": [ + * 0.014539449, + * -0.015288644 + * ] + * } + * ], + * "model": "text-embedding-ada-002-v2", + * "usage": { + * "prompt_tokens": 8, + * "total_tokens": 8 + * } + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "text_embeddings":"$.data[*].embedding[*]" + * } + * } + * + * # sparse_embedding + * # For a response like this: + * + * { + * "request_id": "75C50B5B-E79E-4930-****-F48DBB392231", + * "latency": 22, + * "usage": { + * "token_count": 11 + * }, + * "result": { + * "sparse_embeddings": [ + * { + * "index": 0, + * "embedding": [ + * { + * "token_id": 6, + * "weight": 0.101 + * }, + * { + * "token_id": 163040, + * "weight": 0.28417 + * } + * ] + * } + * ] + * } + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "token_path":"$.result.sparse_embeddings[*].embedding[*].token_id", + * "weight_path":"$.result.sparse_embeddings[*].embedding[*].weight" + * } + * } + * + * # rerank + * # For a response like this: + * + * { + * "results": [ + * { + * "index": 3, + * "relevance_score": 0.999071, + * "document": "abc" + * }, + * { + * "index": 4, + * "relevance_score": 0.7867867, + * "document": "123" + * }, + * { + * "index": 0, + * "relevance_score": 0.32713068, + * "document": "super" + * } + * ], + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "reranked_index":"$.result.scores[*].index", // optional + * "relevance_score":"$.result.scores[*].score", + * "document_text":"xxx" // optional + * } + * } + * + * # completion + * # For a response like this: + * + * { + * "id": "chatcmpl-B9MBs8CjcvOU2jLn4n570S5qMJKcT", + * "object": "chat.completion", + * "created": 1741569952, + * "model": "gpt-4.1-2025-04-14", + * "choices": [ + * { + * "index": 0, + * "message": { + * "role": "assistant", + * "content": "Hello! How can I assist you today?", + * "refusal": null, + * "annotations": [] + * }, + * "logprobs": null, + * "finish_reason": "stop" + * } + * ] + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "completion_result":"$.choices[*].message.content" + * } + * } */ json_parser: any } export interface InferenceCustomServiceSettings { + /** Specifies the HTTPS header parameters – such as `Authentication` or `Contet-Type` – that are required to access the custom service. + * For example: + * ``` + * "headers":{ + * "Authorization": "Bearer ${api_key}", + * "Content-Type": "application/json;charset=utf-8" + * } + * ``` */ headers?: any + /** Specifies the input type translation values that are used to replace the `${input_type}` template in the request body. + * For example: + * ``` + * "input_type": { + * "translation": { + * "ingest": "do_ingest", + * "search": "do_search" + * }, + * "default": "a_default" + * }, + * ``` + * If the subsequent inference requests come from a search context, the `search` key will be used and the template will be replaced with `do_search`. + * If it comes from the ingest context `do_ingest` is used. If it's a different context that is not specified, the default value will be used. If no default is specified an empty string is used. + * `translation` can be: + * * `classification` + * * `clustering` + * * `ingest` + * * `search` */ input_type?: any + /** Specifies the query parameters as a list of tuples. The arrays inside the `query_parameters` must have two items, a key and a value. + * For example: + * ``` + * "query_parameters":[ + * ["param_key", "some_value"], + * ["param_key", "another_value"], + * ["other_key", "other_value"] + * ] + * ``` + * If the base url is `https://www.elastic.co` it results in: `https://www.elastic.co?param_key=some_value¶m_key=another_value&other_key=other_value`. */ query_parameters?: any + /** The request configuration object. */ request: InferenceCustomRequestParams + /** The response configuration object. */ response: InferenceCustomResponseParams + /** Specifies secret parameters, like `api_key` or `api_token`, that are required to access the custom service. + * For example: + * ``` + * "secret_parameters":{ + * "api_key":"" + * } + * ``` */ secret_parameters: any + /** The URL endpoint to use for the requests. */ url?: string } export type InferenceCustomServiceType = 'custom' export interface InferenceCustomTaskSettings { + /** Specifies parameters that are required to run the custom service. The parameters depend on the model your custom service uses. + * For example: + * ``` + * "task_settings":{ + * "parameters":{ + * "input_type":"query", + * "return_token":true + * } + * } + * ``` */ parameters?: any } export type InferenceCustomTaskType = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion' export interface InferenceDeepSeekServiceSettings { + /** A valid API key for your DeepSeek account. + * You can find or create your DeepSeek API keys on the DeepSeek API key page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string + /** For a `completion` or `chat_completion` task, the name of the model to use for the inference task. + * + * For the available `completion` and `chat_completion` models, refer to the [DeepSeek Models & Pricing docs](https://api-docs.deepseek.com/quick_start/pricing). */ model_id: string + /** The URL endpoint to use for the requests. Defaults to `https://api.deepseek.com/chat/completions`. */ url?: string } @@ -13538,24 +22668,58 @@ export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] export interface InferenceElasticsearchServiceSettings { + /** Adaptive allocations configuration details. + * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. + * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. + * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. + * If `enabled` is true, do not set the number of allocations manually. */ adaptive_allocations?: InferenceAdaptiveAllocations + /** The deployment identifier for a trained model deployment. + * When `deployment_id` is used the `model_id` is optional. */ deployment_id?: string + /** The name of the model to use for the inference task. + * It can be the ID of a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model that was uploaded by using the Eland client. */ model_id: string + /** The total number of allocations that are assigned to the model across machine learning nodes. + * Increasing this value generally increases the throughput. + * If adaptive allocations are enabled, do not set this value because it's automatically set. */ num_allocations?: integer + /** The number of threads used by each model allocation during inference. + * This setting generally increases the speed per inference request. + * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. + * The value must be a power of 2. + * The maximum value is 32. */ num_threads: integer } export type InferenceElasticsearchServiceType = 'elasticsearch' export interface InferenceElasticsearchTaskSettings { + /** For a `rerank` task, return the document instead of only the index. */ return_documents?: boolean } export type InferenceElasticsearchTaskType = 'rerank' | 'sparse_embedding' | 'text_embedding' export interface InferenceElserServiceSettings { + /** Adaptive allocations configuration details. + * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. + * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. + * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. + * If `enabled` is true, do not set the number of allocations manually. */ adaptive_allocations?: InferenceAdaptiveAllocations + /** The total number of allocations this model is assigned across machine learning nodes. + * Increasing this value generally increases the throughput. + * If adaptive allocations is enabled, do not set this value because it's automatically set. */ num_allocations: integer + /** The number of threads used by each model allocation during inference. + * Increasing this value generally increases the speed per inference request. + * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. + * The value must be a power of 2. + * The maximum value is 32. + * + * > info + * > If you want to optimize your ELSER endpoint for ingest, set the number of threads to 1. If you want to optimize your ELSER endpoint for search, set the number of threads to greater than 1. */ num_threads: integer } @@ -13566,157 +22730,245 @@ export type InferenceElserTaskType = 'sparse_embedding' export type InferenceGoogleAiServiceType = 'googleaistudio' export interface InferenceGoogleAiStudioServiceSettings { + /** A valid API key of your Google Gemini account. */ api_key: string + /** The name of the model to use for the inference task. + * Refer to the Google documentation for the list of supported models. */ model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Google AI Studio. + * By default, the `googleaistudio` service sets the number of requests allowed per minute to 360. */ rate_limit?: InferenceRateLimitSetting } export type InferenceGoogleAiStudioTaskType = 'completion' | 'text_embedding' export interface InferenceGoogleVertexAIServiceSettings { + /** The name of the location to use for the inference task. + * Refer to the Google documentation for the list of supported locations. */ location: string + /** The name of the model to use for the inference task. + * Refer to the Google documentation for the list of supported models. */ model_id: string + /** The name of the project to use for the inference task. */ project_id: string + /** This setting helps to minimize the number of rate limit errors returned from Google Vertex AI. + * By default, the `googlevertexai` service sets the number of requests allowed per minute to 30.000. */ rate_limit?: InferenceRateLimitSetting + /** A valid service account in JSON format for the Google Vertex AI API. */ service_account_json: string } export type InferenceGoogleVertexAIServiceType = 'googlevertexai' export interface InferenceGoogleVertexAITaskSettings { + /** For a `text_embedding` task, truncate inputs longer than the maximum token length automatically. */ auto_truncate?: boolean + /** For a `rerank` task, the number of the top N documents that should be returned. */ top_n?: integer } export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion' export interface InferenceHuggingFaceServiceSettings { + /** A valid access token for your HuggingFace account. + * You can create or find your access tokens on the HuggingFace settings page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string + /** This setting helps to minimize the number of rate limit errors returned from Hugging Face. + * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000 for all supported tasks. + * Hugging Face does not publish a universal rate limit — actual limits may vary. + * It is recommended to adjust this value based on the capacity and limits of your specific deployment environment. */ rate_limit?: InferenceRateLimitSetting + /** The URL endpoint to use for the requests. + * For `completion` and `chat_completion` tasks, the deployed model must be compatible with the Hugging Face Chat Completion interface (see the linked external documentation for details). The endpoint URL for the request must include `/v1/chat/completions`. + * If the model supports the OpenAI Chat Completion schema, a toggle should appear in the interface. Enabling this toggle doesn't change any model behavior, it reveals the full endpoint URL needed (which should include `/v1/chat/completions`) when configuring the inference endpoint in Elasticsearch. If the model doesn't support this schema, the toggle may not be shown. */ url: string + /** The name of the HuggingFace model to use for the inference task. + * For `completion` and `chat_completion` tasks, this field is optional but may be required for certain models — particularly when using serverless inference endpoints. + * For the `text_embedding` task, this field should not be included. Otherwise, the request will fail. */ model_id?: string } export type InferenceHuggingFaceServiceType = 'hugging_face' export interface InferenceHuggingFaceTaskSettings { + /** For a `rerank` task, return doc text within the results. */ return_documents?: boolean + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. */ top_n?: integer } export type InferenceHuggingFaceTaskType = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding' export interface InferenceInferenceChunkingSettings { + /** The maximum size of a chunk in words. + * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ max_chunk_size?: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ sentence_overlap?: integer + /** The chunking strategy: `sentence` or `word`. */ strategy?: string } export interface InferenceInferenceEndpoint { + /** Chunking configuration object */ chunking_settings?: InferenceInferenceChunkingSettings + /** The service type */ service: string + /** Settings specific to the service */ service_settings: InferenceServiceSettings + /** Task settings specific to the service and task type */ task_settings?: InferenceTaskSettings } export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskType } export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeAlibabaCloudAI } export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeAmazonBedrock } export interface InferenceInferenceEndpointInfoAmazonSageMaker extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeAmazonSageMaker } export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeAnthropic } export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeAzureAIStudio } export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeAzureOpenAI } export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeCohere } export interface InferenceInferenceEndpointInfoCustom extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeCustom } export interface InferenceInferenceEndpointInfoDeepSeek extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeDeepSeek } export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeELSER } export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeElasticsearch } export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeGoogleAIStudio } export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeGoogleVertexAI } export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeHuggingFace } export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeJinaAi } export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeMistral } export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeOpenAI } export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeVoyageAI } export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskTypeWatsonx } @@ -13730,9 +22982,23 @@ export interface InferenceInferenceResult { } export interface InferenceJinaAIServiceSettings { + /** A valid API key of your JinaAI account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string + /** The name of the model to use for the inference task. + * For a `rerank` task, it is required. + * For a `text_embedding` task, it is optional. */ model_id?: string + /** This setting helps to minimize the number of rate limit errors returned from JinaAI. + * By default, the `jinaai` service sets the number of requests allowed per minute to 2000 for all task types. */ rate_limit?: InferenceRateLimitSetting + /** For a `text_embedding` task, the similarity measure. One of cosine, dot_product, l2_norm. + * The default values varies with the embedding type. + * For example, a float embedding type uses a `dot_product` similarity measure by default. */ similarity?: InferenceJinaAISimilarityType } @@ -13741,8 +23007,19 @@ export type InferenceJinaAIServiceType = 'jinaai' export type InferenceJinaAISimilarityType = 'cosine' | 'dot_product' | 'l2_norm' export interface InferenceJinaAITaskSettings { + /** For a `rerank` task, return the doc text within the results. */ return_documents?: boolean + /** For a `text_embedding` task, the task passed to the model. + * Valid values are: + * + * * `classification`: Use it for embeddings passed through a text classifier. + * * `clustering`: Use it for the embeddings run through a clustering algorithm. + * * `ingest`: Use it for storing document embeddings in a vector database. + * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. */ task?: InferenceJinaAITextEmbeddingTask + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. + * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ top_n?: integer } @@ -13751,18 +23028,68 @@ export type InferenceJinaAITaskType = 'rerank' | 'text_embedding' export type InferenceJinaAITextEmbeddingTask = 'classification' | 'clustering' | 'ingest' | 'search' export interface InferenceMessage { + /** The content of the message. + * + * String example: + * ``` + * { + * "content": "Some string" + * } + * ``` + * + * Object example: + * ``` + * { + * "content": [ + * { + * "text": "Some text", + * "type": "text" + * } + * ] + * } + * ``` */ content?: InferenceMessageContent + /** The role of the message author. Valid values are `user`, `assistant`, `system`, and `tool`. */ role: string + /** Only for `tool` role messages. The tool call that this message is responding to. */ tool_call_id?: Id + /** Only for `assistant` role messages. The tool calls generated by the model. If it's specified, the `content` field is optional. + * Example: + * ``` + * { + * "tool_calls": [ + * { + * "id": "call_KcAjWtAww20AihPHphUh46Gd", + * "type": "function", + * "function": { + * "name": "get_current_weather", + * "arguments": "{\"location\":\"Boston, MA\"}" + * } + * } + * ] + * } + * ``` */ tool_calls?: InferenceToolCall[] } export type InferenceMessageContent = string | InferenceContentObject[] export interface InferenceMistralServiceSettings { + /** A valid API key of your Mistral account. + * You can find your Mistral API keys or you can create a new one on the API Keys page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string + /** The maximum number of tokens per input before chunking occurs. */ max_input_tokens?: integer + /** The name of the model to use for the inference task. + * Refer to the Mistral models documentation for the list of available models. */ model: string + /** This setting helps to minimize the number of rate limit errors returned from the Mistral API. + * By default, the `mistral` service sets the number of requests allowed per minute to 240. */ rate_limit?: InferenceRateLimitSetting } @@ -13771,17 +23098,39 @@ export type InferenceMistralServiceType = 'mistral' export type InferenceMistralTaskType = 'text_embedding' | 'completion' | 'chat_completion' export interface InferenceOpenAIServiceSettings { + /** A valid API key of your OpenAI account. + * You can find your OpenAI API keys in your OpenAI account under the API keys section. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string + /** The number of dimensions the resulting output embeddings should have. + * It is supported only in `text-embedding-3` and later models. + * If it is not set, the OpenAI defined default for the model is used. */ dimensions?: integer + /** The name of the model to use for the inference task. + * Refer to the OpenAI documentation for the list of available text embedding models. */ model_id: string + /** The unique identifier for your organization. + * You can find the Organization ID in your OpenAI account under *Settings > Organizations*. */ organization_id?: string + /** This setting helps to minimize the number of rate limit errors returned from OpenAI. + * The `openai` service sets a default number of requests allowed per minute depending on the task type. + * For `text_embedding`, it is set to `3000`. + * For `completion`, it is set to `500`. */ rate_limit?: InferenceRateLimitSetting + /** The URL endpoint to use for the requests. + * It can be changed for testing purposes. */ url?: string } export type InferenceOpenAIServiceType = 'openai' export interface InferenceOpenAITaskSettings { + /** For a `completion` or `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ user?: string } @@ -13794,17 +23143,65 @@ export interface InferenceRankedDocument { } export interface InferenceRateLimitSetting { + /** The number of requests allowed per minute. */ requests_per_minute?: integer } export interface InferenceRequestChatCompletion { + /** A list of objects representing the conversation. + * Requests should generally only add new messages from the user (role `user`). + * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */ messages: InferenceMessage[] + /** The ID of the model to use. */ model?: string + /** The upper bound limit for the number of tokens that can be generated for a completion request. */ max_completion_tokens?: long + /** A sequence of strings to control when the model should stop generating additional tokens. */ stop?: string[] + /** The sampling temperature to use. */ temperature?: float + /** Controls which tool is called by the model. + * String representation: One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools. + * Example (object representation): + * ``` + * { + * "tool_choice": { + * "type": "function", + * "function": { + * "name": "get_current_weather" + * } + * } + * } + * ``` */ tool_choice?: InferenceCompletionToolType + /** A list of tools that the model can call. + * Example: + * ``` + * { + * "tools": [ + * { + * "type": "function", + * "function": { + * "name": "get_price_of_item", + * "description": "Get the current price of an item", + * "parameters": { + * "type": "object", + * "properties": { + * "item": { + * "id": "12345" + * }, + * "unit": { + * "type": "currency" + * } + * } + * } + * } + * } + * ] + * } + * ``` */ tools?: InferenceCompletionTool[] + /** Nucleus sampling, an alternative to sampling with temperature. */ top_p?: float } @@ -13881,40 +23278,83 @@ export interface InferenceTextEmbeddingResult { } export interface InferenceToolCall { + /** The identifier of the tool call. */ id: Id + /** The function that the model called. */ function: InferenceToolCallFunction + /** The type of the tool call. */ type: string } export interface InferenceToolCallFunction { + /** The arguments to call the function with in JSON format. */ arguments: string + /** The name of the function to call. */ name: string } export interface InferenceVoyageAIServiceSettings { + /** The number of dimensions for resulting output embeddings. + * This setting maps to `output_dimension` in the VoyageAI documentation. + * Only for the `text_embedding` task type. */ dimensions?: integer + /** The name of the model to use for the inference task. + * Refer to the VoyageAI documentation for the list of available text embedding and rerank models. */ model_id: string + /** This setting helps to minimize the number of rate limit errors returned from VoyageAI. + * The `voyageai` service sets a default number of requests allowed per minute depending on the task type. + * For both `text_embedding` and `rerank`, it is set to `2000`. */ rate_limit?: InferenceRateLimitSetting + /** The data type for the embeddings to be returned. + * This setting maps to `output_dtype` in the VoyageAI documentation. + * Permitted values: float, int8, bit. + * `int8` is a synonym of `byte` in the VoyageAI documentation. + * `bit` is a synonym of `binary` in the VoyageAI documentation. + * Only for the `text_embedding` task type. */ embedding_type?: float } export type InferenceVoyageAIServiceType = 'voyageai' export interface InferenceVoyageAITaskSettings { + /** Type of the input text. + * Permitted values: `ingest` (maps to `document` in the VoyageAI documentation), `search` (maps to `query` in the VoyageAI documentation). + * Only for the `text_embedding` task type. */ input_type?: string + /** Whether to return the source documents in the response. + * Only for the `rerank` task type. */ return_documents?: boolean + /** The number of most relevant documents to return. + * If not specified, the reranking results of all documents will be returned. + * Only for the `rerank` task type. */ top_k?: integer + /** Whether to truncate the input texts to fit within the context length. */ truncation?: boolean } export type InferenceVoyageAITaskType = 'text_embedding' | 'rerank' export interface InferenceWatsonxServiceSettings { + /** A valid API key of your Watsonx account. + * You can find your Watsonx API keys or you can create a new one on the API keys page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ api_key: string + /** A version parameter that takes a version date in the format of `YYYY-MM-DD`. + * For the active version data parameters, refer to the Wastonx documentation. */ api_version: string + /** The name of the model to use for the inference task. + * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models. */ model_id: string + /** The identifier of the IBM Cloud project to use for the inference task. */ project_id: string + /** This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ rate_limit?: InferenceRateLimitSetting + /** The URL of the inference endpoint that you created on Watsonx. */ url: string } @@ -13923,34 +23363,63 @@ export type InferenceWatsonxServiceType = 'watsonxai' export type InferenceWatsonxTaskType = 'text_embedding' export interface InferenceChatCompletionUnifiedRequest extends RequestBase { + /** The inference Id */ inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration chat_completion_request?: InferenceRequestChatCompletion + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, chat_completion_request?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, chat_completion_request?: never } } export type InferenceChatCompletionUnifiedResponse = StreamResult export interface InferenceCompletionRequest extends RequestBase { + /** The inference Id */ inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration + /** Inference input. + * Either a string or an array of strings. */ input: string | string[] + /** Optional task settings */ task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } export type InferenceCompletionResponse = InferenceCompletionInferenceResult export interface InferenceDeleteRequest extends RequestBase { + /** The task type */ task_type?: InferenceTaskType + /** The inference identifier. */ inference_id: Id + /** When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. */ dry_run?: boolean + /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. */ force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } } export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult export interface InferenceGetRequest extends RequestBase { + /** The task type */ task_type?: InferenceTaskType + /** The inference Id */ inference_id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never } } export interface InferenceGetResponse { @@ -13958,357 +23427,720 @@ export interface InferenceGetResponse { } export interface InferenceInferenceRequest extends RequestBase { + /** The type of inference task that the model performs. */ task_type?: InferenceTaskType + /** The unique identifier for the inference endpoint. */ inference_id: Id + /** The amount of time to wait for the inference request to complete. */ timeout?: Duration + /** The query input, which is required only for the `rerank` task. + * It is not required for other tasks. */ query?: string + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * > info + * > Inference endpoints for the `completion` task type currently only support a single string as input. */ input: string | string[] + /** Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include: + * * `SEARCH` + * * `INGEST` + * * `CLASSIFICATION` + * * `CLUSTERING` + * Not all services support all values. Unsupported values will trigger a validation exception. + * Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + * + * > info + * > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. */ input_type?: string + /** Task settings for the individual inference request. + * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, input_type?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, input_type?: never, task_settings?: never } } export type InferenceInferenceResponse = InferenceInferenceResult export interface InferencePutRequest extends RequestBase { + /** The task type. Refer to the integration list in the API description for the available task types. */ task_type?: InferenceTaskType + /** The inference Id */ inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration inference_config?: InferenceInferenceEndpoint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, inference_config?: never } } export type InferencePutResponse = InferenceInferenceEndpointInfo export interface InferencePutAlibabacloudRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceAlibabaCloudTaskType + /** The unique identifier of the inference endpoint. */ alibabacloud_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. */ service: InferenceAlibabaCloudServiceType + /** Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. */ service_settings: InferenceAlibabaCloudServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceAlibabaCloudTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI export interface InferencePutAmazonbedrockRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceAmazonBedrockTaskType + /** The unique identifier of the inference endpoint. */ amazonbedrock_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `amazonbedrock`. */ service: InferenceAmazonBedrockServiceType + /** Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. */ service_settings: InferenceAmazonBedrockServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceAmazonBedrockTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock export interface InferencePutAmazonsagemakerRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceTaskTypeAmazonSageMaker + /** The unique identifier of the inference endpoint. */ amazonsagemaker_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `amazon_sagemaker`. */ service: InferenceAmazonSageMakerServiceType + /** Settings used to install the inference model. + * These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. */ service_settings: InferenceAmazonSageMakerServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type and `service_settings.api` you specified. */ task_settings?: InferenceAmazonSageMakerTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, amazonsagemaker_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, amazonsagemaker_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAmazonsagemakerResponse = InferenceInferenceEndpointInfoAmazonSageMaker export interface InferencePutAnthropicRequest extends RequestBase { + /** The task type. + * The only valid task type for the model to perform is `completion`. */ task_type: InferenceAnthropicTaskType + /** The unique identifier of the inference endpoint. */ anthropic_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `anthropic`. */ service: InferenceAnthropicServiceType + /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ service_settings: InferenceAnthropicServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceAnthropicTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic export interface InferencePutAzureaistudioRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceAzureAiStudioTaskType + /** The unique identifier of the inference endpoint. */ azureaistudio_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ service: InferenceAzureAiStudioServiceType + /** Settings used to install the inference model. These settings are specific to the `openai` service. */ service_settings: InferenceAzureAiStudioServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceAzureAiStudioTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio export interface InferencePutAzureopenaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ task_type: InferenceAzureOpenAITaskType + /** The unique identifier of the inference endpoint. */ azureopenai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `azureopenai`. */ service: InferenceAzureOpenAIServiceType + /** Settings used to install the inference model. These settings are specific to the `azureopenai` service. */ service_settings: InferenceAzureOpenAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceAzureOpenAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI export interface InferencePutCohereRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceCohereTaskType + /** The unique identifier of the inference endpoint. */ cohere_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `cohere`. */ service: InferenceCohereServiceType + /** Settings used to install the inference model. + * These settings are specific to the `cohere` service. */ service_settings: InferenceCohereServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceCohereTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere export interface InferencePutCustomRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceCustomTaskType + /** The unique identifier of the inference endpoint. */ custom_inference_id: Id + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `custom`. */ service: InferenceCustomServiceType + /** Settings used to install the inference model. + * These settings are specific to the `custom` service. */ service_settings: InferenceCustomServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceCustomTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, custom_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, custom_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutCustomResponse = InferenceInferenceEndpointInfoCustom export interface InferencePutDeepseekRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceTaskTypeDeepSeek + /** The unique identifier of the inference endpoint. */ deepseek_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `deepseek`. */ service: InferenceDeepSeekServiceType + /** Settings used to install the inference model. + * These settings are specific to the `deepseek` service. */ service_settings: InferenceDeepSeekServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutDeepseekResponse = InferenceInferenceEndpointInfoDeepSeek export interface InferencePutElasticsearchRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceElasticsearchTaskType + /** The unique identifier of the inference endpoint. + * The must not match the `model_id`. */ elasticsearch_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ service: InferenceElasticsearchServiceType + /** Settings used to install the inference model. These settings are specific to the `elasticsearch` service. */ service_settings: InferenceElasticsearchServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceElasticsearchTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch export interface InferencePutElserRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceElserTaskType + /** The unique identifier of the inference endpoint. */ elser_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `elser`. */ service: InferenceElserServiceType + /** Settings used to install the inference model. These settings are specific to the `elser` service. */ service_settings: InferenceElserServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, elser_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER export interface InferencePutGoogleaistudioRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceGoogleAiStudioTaskType + /** The unique identifier of the inference endpoint. */ googleaistudio_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `googleaistudio`. */ service: InferenceGoogleAiServiceType + /** Settings used to install the inference model. These settings are specific to the `googleaistudio` service. */ service_settings: InferenceGoogleAiStudioServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio export interface InferencePutGooglevertexaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceGoogleVertexAITaskType + /** The unique identifier of the inference endpoint. */ googlevertexai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `googlevertexai`. */ service: InferenceGoogleVertexAIServiceType + /** Settings used to install the inference model. These settings are specific to the `googlevertexai` service. */ service_settings: InferenceGoogleVertexAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceGoogleVertexAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI export interface InferencePutHuggingFaceRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceHuggingFaceTaskType + /** The unique identifier of the inference endpoint. */ huggingface_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `hugging_face`. */ service: InferenceHuggingFaceServiceType + /** Settings used to install the inference model. These settings are specific to the `hugging_face` service. */ service_settings: InferenceHuggingFaceServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceHuggingFaceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace export interface InferencePutJinaaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceJinaAITaskType + /** The unique identifier of the inference endpoint. */ jinaai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `jinaai`. */ service: InferenceJinaAIServiceType + /** Settings used to install the inference model. These settings are specific to the `jinaai` service. */ service_settings: InferenceJinaAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceJinaAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi export interface InferencePutMistralRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceMistralTaskType + /** The unique identifier of the inference endpoint. */ mistral_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `mistral`. */ service: InferenceMistralServiceType + /** Settings used to install the inference model. These settings are specific to the `mistral` service. */ service_settings: InferenceMistralServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral export interface InferencePutOpenaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ task_type: InferenceOpenAITaskType + /** The unique identifier of the inference endpoint. */ openai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `openai`. */ service: InferenceOpenAIServiceType + /** Settings used to install the inference model. These settings are specific to the `openai` service. */ service_settings: InferenceOpenAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceOpenAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI export interface InferencePutVoyageaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ task_type: InferenceVoyageAITaskType + /** The unique identifier of the inference endpoint. */ voyageai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `voyageai`. */ service: InferenceVoyageAIServiceType + /** Settings used to install the inference model. These settings are specific to the `voyageai` service. */ service_settings: InferenceVoyageAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ task_settings?: InferenceVoyageAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI export interface InferencePutWatsonxRequest extends RequestBase { + /** The task type. + * The only valid task type for the model to perform is `text_embedding`. */ task_type: InferenceWatsonxTaskType + /** The unique identifier of the inference endpoint. */ watsonx_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The type of service supported for the specified task type. In this case, `watsonxai`. */ service: InferenceWatsonxServiceType + /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ service_settings: InferenceWatsonxServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, service?: never, service_settings?: never } } export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx export interface InferenceRerankRequest extends RequestBase { + /** The unique identifier for the inference endpoint. */ inference_id: Id + /** The amount of time to wait for the inference request to complete. */ timeout?: Duration + /** Query input. */ query: string + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * > info + * > Inference endpoints for the `completion` task type currently only support a single string as input. */ input: string | string[] + /** Task settings for the individual inference request. + * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } } export type InferenceRerankResponse = InferenceRerankedInferenceResult export interface InferenceSparseEmbeddingRequest extends RequestBase { + /** The inference Id */ inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration + /** Inference input. + * Either a string or an array of strings. */ input: string | string[] + /** Optional task settings */ task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult export interface InferenceStreamCompletionRequest extends RequestBase { + /** The unique identifier for the inference endpoint. */ inference_id: Id + /** The amount of time to wait for the inference request to complete. */ timeout?: Duration + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ input: string | string[] + /** Optional task settings */ task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } export type InferenceStreamCompletionResponse = StreamResult export interface InferenceTextEmbeddingRequest extends RequestBase { + /** The inference Id */ inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration + /** Inference input. + * Either a string or an array of strings. */ input: string | string[] + /** Optional task settings */ task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult export interface InferenceUpdateRequest extends RequestBase { + /** The unique identifier of the inference endpoint. */ inference_id: Id + /** The type of inference task that the model performs. */ task_type?: InferenceTaskType inference_config?: InferenceInferenceEndpoint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, task_type?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, task_type?: never, inference_config?: never } } export type InferenceUpdateResponse = InferenceInferenceEndpointInfo export interface IngestAppendProcessor extends IngestProcessorBase { + /** The field to be appended to. + * Supports template snippets. */ field: Field + /** The value to be appended. Supports template snippets. */ value: any | any[] + /** If `false`, the processor does not append values already present in the field. */ allow_duplicates?: boolean } export interface IngestAttachmentProcessor extends IngestProcessorBase { + /** The field to get the base64 encoded field from. */ field: Field + /** If `true` and field does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The number of chars being used for extraction to prevent huge fields. + * Use `-1` for no limit. */ indexed_chars?: long + /** Field name from which you can overwrite the number of chars being used for extraction. */ indexed_chars_field?: Field + /** Array of properties to select to be stored. + * Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`. */ properties?: string[] + /** The field that will hold the attachment information. */ target_field?: Field + /** If true, the binary field will be removed from the document */ remove_binary?: boolean + /** Field containing the name of the resource to decode. + * If specified, the processor passes this resource name to the underlying Tika library to enable Resource Name Based Detection. */ resource_name?: string } export interface IngestBytesProcessor extends IngestProcessorBase { + /** The field to convert. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestCircleProcessor extends IngestProcessorBase { + /** The difference between the resulting inscribed distance from center to side and the circle’s radius (measured in meters for `geo_shape`, unit-less for `shape`). */ error_distance: double + /** The field to interpret as a circle. Either a string in WKT format or a map for GeoJSON. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Which field mapping type is to be used when processing the circle: `geo_shape` or `shape`. */ shape_type: IngestShapeType + /** The field to assign the polygon shape to + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestCommunityIDProcessor extends IngestProcessorBase { + /** Field containing the source IP address. */ source_ip?: Field + /** Field containing the source port. */ source_port?: Field + /** Field containing the destination IP address. */ destination_ip?: Field + /** Field containing the destination port. */ destination_port?: Field + /** Field containing the IANA number. */ iana_number?: Field + /** Field containing the ICMP type. */ icmp_type?: Field + /** Field containing the ICMP code. */ icmp_code?: Field + /** Field containing the transport protocol name or number. Used only when the + * iana_number field is not present. The following protocol names are currently + * supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, udp */ transport?: Field + /** Output field for the community ID. */ target_field?: Field + /** Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The + * seed can prevent hash collisions between network domains, such as a staging + * and production network that use the same addressing scheme. */ seed?: integer + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestConvertProcessor extends IngestProcessorBase { + /** The field whose value is to be converted. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the `field` is updated in-place. */ target_field?: Field + /** The type to convert the existing value to. */ type: IngestConvertType } export type IngestConvertType = 'integer' | 'long' | 'double' | 'float' | 'boolean' | 'ip' | 'string' | 'auto' export interface IngestCsvProcessor extends IngestProcessorBase { + /** Value used to fill empty fields. + * Empty fields are skipped if this is not provided. + * An empty field is one with no value (2 consecutive separators) or empty quotes (`""`). */ empty_value?: any + /** The field to extract data from. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Quote used in CSV, has to be single character string. */ quote?: string + /** Separator used in CSV, has to be single character string. */ separator?: string + /** The array of fields to assign extracted values to. */ target_fields: Fields + /** Trim whitespaces in unquoted fields. */ trim?: boolean } export interface IngestDatabaseConfiguration { + /** The provider-assigned name of the IP geolocation database to download. */ name: Name maxmind?: IngestMaxmind ipinfo?: IngestIpinfo @@ -14317,49 +24149,86 @@ export interface IngestDatabaseConfiguration { export interface IngestDatabaseConfigurationFull { web?: IngestWeb local?: IngestLocal + /** The provider-assigned name of the IP geolocation database to download. */ name: Name maxmind?: IngestMaxmind ipinfo?: IngestIpinfo } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { + /** An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. + * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ date_formats: string[] + /** How to round the date when formatting the date into the index name. Valid values are: + * `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). + * Supports template snippets. */ date_rounding: string + /** The field to get the date or timestamp from. */ field: Field + /** The format to be used when printing the parsed date into the index name. + * A valid java time pattern is expected here. + * Supports template snippets. */ index_name_format?: string + /** A prefix of the index name to be prepended before the printed date. + * Supports template snippets. */ index_name_prefix?: string + /** The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. */ locale?: string + /** The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. */ timezone?: string } export interface IngestDateProcessor extends IngestProcessorBase { + /** The field to get the date from. */ field: Field + /** An array of the expected date formats. + * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ formats: string[] + /** The locale to use when parsing the date, relevant when parsing month names or week days. + * Supports template snippets. */ locale?: string + /** The field that will hold the parsed date. */ target_field?: Field + /** The timezone to use when parsing the date. + * Supports template snippets. */ timezone?: string + /** The format to use when writing the date to target_field. Must be a valid + * java time pattern. */ output_format?: string } export interface IngestDissectProcessor extends IngestProcessorBase { + /** The character(s) that separate the appended fields. */ append_separator?: string + /** The field to dissect. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The pattern to apply to the field. */ pattern: string } export interface IngestDocument { + /** Unique identifier for the document. + * This ID must be unique within the `_index`. */ _id?: Id + /** Name of the index containing the document. */ _index?: IndexName + /** JSON body for the document. */ _source: any } export interface IngestDocumentSimulationKeys { + /** Unique identifier for the document. This ID must be unique within the `_index`. */ _id: Id + /** Name of the index containing the document. */ _index: IndexName _ingest: IngestIngest + /** Value used to send the document to a specific primary shard. */ _routing?: string + /** JSON body for the document. */ _source: Record + /** */ _version?: SpecUtilsStringified _version_type?: VersionType } @@ -14367,8 +24236,15 @@ export type IngestDocumentSimulation = IngestDocumentSimulationKeys & { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType } export interface IngestDotExpanderProcessor extends IngestProcessorBase { + /** The field to expand into an object field. + * If set to `*`, all top-level fields will be expanded. */ field: Field + /** Controls the behavior when there is already an existing nested object that conflicts with the expanded field. + * When `false`, the processor will merge conflicts by combining the old and the new values into an array. + * When `true`, the value from the expanded field will overwrite the existing value. */ override?: boolean + /** The field that contains the field to expand. + * Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. */ path?: string } @@ -14376,44 +24252,81 @@ export interface IngestDropProcessor extends IngestProcessorBase { } export interface IngestEnrichProcessor extends IngestProcessorBase { + /** The field in the input document that matches the policies match_field used to retrieve the enrichment data. + * Supports template snippets. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The maximum number of matched documents to include under the configured target field. + * The `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object. + * In order to avoid documents getting too large, the maximum allowed value is 128. */ max_matches?: integer + /** If processor will update fields with pre-existing non-null-valued field. + * When set to `false`, such fields will not be touched. */ override?: boolean + /** The name of the enrich policy to use. */ policy_name: string + /** A spatial relation operator used to match the geoshape of incoming documents to documents in the enrich index. + * This option is only used for `geo_match` enrich policy types. */ shape_relation?: GeoShapeRelation + /** Field added to incoming documents to contain enrich data. This field contains both the `match_field` and `enrich_fields` specified in the enrich policy. + * Supports template snippets. */ target_field: Field } export interface IngestFailProcessor extends IngestProcessorBase { + /** The error message thrown by the processor. + * Supports template snippets. */ message: string } export type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3' export interface IngestFingerprintProcessor extends IngestProcessorBase { + /** Array of fields to include in the fingerprint. For objects, the processor + * hashes both the field key and value. For other fields, the processor hashes + * only the field value. */ fields: Fields + /** Output field for the fingerprint. */ target_field?: Field + /** Salt value for the hash function. */ salt?: string + /** The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, + * SHA-256, SHA-512, or MurmurHash3. */ method?: IngestFingerprintDigest + /** If true, the processor ignores any missing fields. If all fields are + * missing, the processor silently exits without modifying the document. */ ignore_missing?: boolean } export interface IngestForeachProcessor extends IngestProcessorBase { + /** Field containing array or object values. */ field: Field + /** If `true`, the processor silently exits without changing the document if the `field` is `null` or missing. */ ignore_missing?: boolean + /** Ingest processor to run on each element. */ processor: IngestProcessorContainer } export interface IngestGeoGridProcessor extends IngestProcessorBase { + /** The field to interpret as a geo-tile.= + * The field format is determined by the `tile_type`. */ field: string + /** Three tile formats are understood: geohash, geotile and geohex. */ tile_type: IngestGeoGridTileType + /** The field to assign the polygon shape to, by default, the `field` is updated in-place. */ target_field?: Field + /** If specified and a parent tile exists, save that tile address to this field. */ parent_field?: Field + /** If specified and children tiles exist, save those tile addresses to this field as an array of strings. */ children_field?: Field + /** If specified and intersecting non-child tiles exist, save their addresses to this field as an array of strings. */ non_children_field?: Field + /** If specified, save the tile precision (zoom) as an integer to this field. */ precision_field?: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Which format to save the generated polygon in. */ target_format?: IngestGeoGridTargetFormat } @@ -14422,62 +24335,109 @@ export type IngestGeoGridTargetFormat = 'geojson' | 'wkt' export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash' export interface IngestGeoIpProcessor extends IngestProcessorBase { + /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string + /** The field to get the ip address from for the geographical lookup. */ field: Field + /** If `true`, only the first found geoip data will be returned, even if the field contains an array. */ first_only?: boolean + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Controls what properties are added to the `target_field` based on the geoip lookup. */ properties?: string[] + /** The field that will hold the geographical information looked up from the MaxMind database. */ target_field?: Field + /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. + * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ download_database_on_pipeline_creation?: boolean } export interface IngestGrokProcessor extends IngestProcessorBase { + /** Must be disabled or v1. If v1, the processor uses patterns with Elastic + * Common Schema (ECS) field names. */ ecs_compatibility?: string + /** The field to use for grok expression parsing. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. + * Patterns matching existing names will override the pre-existing definition. */ pattern_definitions?: Record + /** An ordered list of grok expression to match and extract named captures with. + * Returns on the first expression in the list that matches. */ patterns: GrokPattern[] + /** When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched. */ trace_match?: boolean } export interface IngestGsubProcessor extends IngestProcessorBase { + /** The field to apply the replacement to. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The pattern to be replaced. */ pattern: string + /** The string to replace the matching patterns with. */ replacement: string + /** The field to assign the converted value to + * By default, the `field` is updated in-place. */ target_field?: Field } export interface IngestHtmlStripProcessor extends IngestProcessorBase { + /** The string-valued field to remove HTML tags from. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document, */ ignore_missing?: boolean + /** The field to assign the converted value to + * By default, the `field` is updated in-place. */ target_field?: Field } export interface IngestInferenceConfig { + /** Regression configuration for inference. */ regression?: IngestInferenceConfigRegression + /** Classification configuration for inference. */ classification?: IngestInferenceConfigClassification } export interface IngestInferenceConfigClassification { + /** Specifies the number of top class predictions to return. */ num_top_classes?: integer + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer + /** The field that is added to incoming documents to contain the inference prediction. */ results_field?: Field + /** Specifies the field to which the top classes are written. */ top_classes_results_field?: Field + /** Specifies the type of the predicted field to write. + * Valid values are: `string`, `number`, `boolean`. */ prediction_field_type?: string } export interface IngestInferenceConfigRegression { + /** The field that is added to incoming documents to contain the inference prediction. */ results_field?: Field + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer } export interface IngestInferenceProcessor extends IngestProcessorBase { + /** The ID or alias for the trained model, or the ID of the deployment. */ model_id: Id + /** Field added to incoming documents to contain results objects. */ target_field?: Field + /** Maps the document field names to the known field names of the model. + * This mapping takes precedence over any default mappings provided in the model configuration. */ field_map?: Record + /** Contains the inference type and its options. */ inference_config?: IngestInferenceConfig + /** Input fields for inference and output (destination) fields for the inference results. + * This option is incompatible with the target_field and field_map options. */ input_output?: IngestInputConfig | IngestInputConfig[] + /** If true and any of the input fields defined in input_ouput are missing + * then those missing fields are quietly ignored, otherwise a missing field causes a failure. + * Only applies when using input_output configurations to explicitly list the input fields. */ ignore_missing?: boolean } @@ -14493,12 +24453,20 @@ export interface IngestInputConfig { } export interface IngestIpLocationProcessor extends IngestProcessorBase { + /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string + /** The field to get the ip address from for the geographical lookup. */ field: Field + /** If `true`, only the first found IP location data will be returned, even if the field contains an array. */ first_only?: boolean + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Controls what properties are added to the `target_field` based on the IP location lookup. */ properties?: string[] + /** The field that will hold the geographical information looked up from the MaxMind database. */ target_field?: Field + /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. + * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ download_database_on_pipeline_creation?: boolean } @@ -14506,32 +24474,61 @@ export interface IngestIpinfo { } export interface IngestJoinProcessor extends IngestProcessorBase { + /** Field containing array values to join. */ field: Field + /** The separator character. */ separator: string + /** The field to assign the joined value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestJsonProcessor extends IngestProcessorBase { + /** Flag that forces the parsed JSON to be added at the top level of the document. + * `target_field` must not be set when this option is chosen. */ add_to_root?: boolean + /** When set to `replace`, root fields that conflict with fields from the parsed JSON will be overridden. + * When set to `merge`, conflicting fields will be merged. + * Only applicable `if add_to_root` is set to true. */ add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy + /** When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys. + * Instead, the last encountered value for any duplicate key wins. */ allow_duplicate_keys?: boolean + /** The field to be parsed. */ field: Field + /** The field that the converted structured object will be written into. + * Any existing content in this field will be overwritten. */ target_field?: Field } export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' export interface IngestKeyValueProcessor extends IngestProcessorBase { + /** List of keys to exclude from document. */ exclude_keys?: string[] + /** The field to be parsed. + * Supports template snippets. */ field: Field + /** Regex pattern to use for splitting key-value pairs. */ field_split: string + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** List of keys to filter and insert into document. + * Defaults to including all keys. */ include_keys?: string[] + /** Prefix to be added to extracted keys. */ prefix?: string + /** If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values. */ strip_brackets?: boolean + /** The field to insert the extracted keys into. + * Defaults to the root of the document. + * Supports template snippets. */ target_field?: Field + /** String of characters to trim from extracted keys. */ trim_key?: string + /** String of characters to trim from extracted values. */ trim_value?: string + /** Regex pattern to use for splitting the key from the value within a key-value pair. */ value_split: string } @@ -14540,8 +24537,12 @@ export interface IngestLocal { } export interface IngestLowercaseProcessor extends IngestProcessorBase { + /** The field to make lowercase. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } @@ -14550,31 +24551,57 @@ export interface IngestMaxmind { } export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { + /** Field containing the source IP address. */ source_ip?: Field + /** Field containing the destination IP address. */ destination_ip?: Field + /** Output field for the network direction. */ target_field?: Field + /** List of internal networks. Supports IPv4 and IPv6 addresses and ranges in + * CIDR notation. Also supports the named ranges listed below. These may be + * constructed with template snippets. Must specify only one of + * internal_networks or internal_networks_field. */ internal_networks?: string[] + /** A field on the given document to read the internal_networks configuration + * from. */ internal_networks_field?: Field + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestPipeline { + /** Description of the ingest pipeline. */ description?: string + /** Processors to run immediately after a processor failure. */ on_failure?: IngestProcessorContainer[] + /** Processors used to perform transformations on documents before indexing. + * Processors run sequentially in the order specified. */ processors?: IngestProcessorContainer[] + /** Version number used by external systems to track ingest pipelines. */ version?: VersionNumber + /** Marks this ingest pipeline as deprecated. + * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** Arbitrary metadata about the ingest pipeline. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata } export interface IngestPipelineConfig { + /** Description of the ingest pipeline. */ description?: string + /** Version number used by external systems to track ingest pipelines. */ version?: VersionNumber + /** Processors used to perform transformations on documents before indexing. + * Processors run sequentially in the order specified. */ processors: IngestProcessorContainer[] } export interface IngestPipelineProcessor extends IngestProcessorBase { + /** The name of the pipeline to execute. + * Supports template snippets. */ name: Name + /** Whether to ignore missing pipelines instead of failing. */ ignore_missing_pipeline?: boolean } @@ -14591,118 +24618,275 @@ export interface IngestPipelineProcessorResult { export type IngestPipelineSimulationStatusOptions = 'success' | 'error' | 'error_ignored' | 'skipped' | 'dropped' export interface IngestProcessorBase { + /** Description of the processor. + * Useful for describing the purpose of the processor or its configuration. */ description?: string + /** Conditionally execute the processor. */ if?: string + /** Ignore failures for the processor. */ ignore_failure?: boolean + /** Handle failures for the processor. */ on_failure?: IngestProcessorContainer[] + /** Identifier for the processor. + * Useful for debugging and metrics. */ tag?: string } export interface IngestProcessorContainer { + /** Appends one or more values to an existing array if the field already exists and it is an array. + * Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. + * Creates an array containing the provided values if the field doesn’t exist. + * Accepts a single value or an array of values. */ append?: IngestAppendProcessor + /** The attachment processor lets Elasticsearch extract file attachments in common formats (such as PPT, XLS, and PDF) by using the Apache text extraction library Tika. */ attachment?: IngestAttachmentProcessor + /** Converts a human readable byte value (for example `1kb`) to its value in bytes (for example `1024`). + * If the field is an array of strings, all members of the array will be converted. + * Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. + * An error will occur if the field is not a supported format or resultant value exceeds 2^63. */ bytes?: IngestBytesProcessor + /** Converts circle definitions of shapes to regular polygons which approximate them. */ circle?: IngestCircleProcessor + /** Computes the Community ID for network flow data as defined in the + * Community ID Specification. You can use a community ID to correlate network + * events related to a single flow. */ community_id?: IngestCommunityIDProcessor + /** Converts a field in the currently ingested document to a different type, such as converting a string to an integer. + * If the field value is an array, all members will be converted. */ convert?: IngestConvertProcessor + /** Extracts fields from CSV line out of a single text field within a document. + * Any empty field in CSV will be skipped. */ csv?: IngestCsvProcessor + /** Parses dates from fields, and then uses the date or timestamp as the timestamp for the document. */ date?: IngestDateProcessor + /** The purpose of this processor is to point documents to the right time based index based on a date or timestamp field in a document by using the date math index name support. */ date_index_name?: IngestDateIndexNameProcessor + /** Extracts structured fields out of a single text field by matching the text field against a delimiter-based pattern. */ dissect?: IngestDissectProcessor + /** Expands a field with dots into an object field. + * This processor allows fields with dots in the name to be accessible by other processors in the pipeline. + * Otherwise these fields can’t be accessed by any processor. */ dot_expander?: IngestDotExpanderProcessor + /** Drops the document without raising any errors. + * This is useful to prevent the document from getting indexed based on some condition. */ drop?: IngestDropProcessor + /** The `enrich` processor can enrich documents with data from another index. */ enrich?: IngestEnrichProcessor + /** Raises an exception. + * This is useful for when you expect a pipeline to fail and want to relay a specific message to the requester. */ fail?: IngestFailProcessor + /** Computes a hash of the document’s content. You can use this hash for + * content fingerprinting. */ fingerprint?: IngestFingerprintProcessor + /** Runs an ingest processor on each element of an array or object. */ foreach?: IngestForeachProcessor + /** Currently an undocumented alias for GeoIP Processor. */ ip_location?: IngestIpLocationProcessor + /** Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. + * This is useful if there is a need to interact with the tile shapes as spatially indexable fields. */ geo_grid?: IngestGeoGridProcessor + /** The `geoip` processor adds information about the geographical location of an IPv4 or IPv6 address. */ geoip?: IngestGeoIpProcessor + /** Extracts structured fields out of a single text field within a document. + * You choose which field to extract matched fields from, as well as the grok pattern you expect will match. + * A grok pattern is like a regular expression that supports aliased expressions that can be reused. */ grok?: IngestGrokProcessor + /** Converts a string field by applying a regular expression and a replacement. + * If the field is an array of string, all members of the array will be converted. + * If any non-string values are encountered, the processor will throw an exception. */ gsub?: IngestGsubProcessor + /** Removes HTML tags from the field. + * If the field is an array of strings, HTML tags will be removed from all members of the array. */ html_strip?: IngestHtmlStripProcessor + /** Uses a pre-trained data frame analytics model or a model deployed for natural language processing tasks to infer against the data that is being ingested in the pipeline. */ inference?: IngestInferenceProcessor + /** Joins each element of an array into a single string using a separator character between each element. + * Throws an error when the field is not an array. */ join?: IngestJoinProcessor + /** Converts a JSON string into a structured JSON object. */ json?: IngestJsonProcessor + /** This processor helps automatically parse messages (or specific event fields) which are of the `foo=bar` variety. */ kv?: IngestKeyValueProcessor + /** Converts a string to its lowercase equivalent. + * If the field is an array of strings, all members of the array will be converted. */ lowercase?: IngestLowercaseProcessor + /** Calculates the network direction given a source IP address, destination IP + * address, and a list of internal networks. */ network_direction?: IngestNetworkDirectionProcessor + /** Executes another pipeline. */ pipeline?: IngestPipelineProcessor + /** The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. + * The processor can be used to obscure Personal Identifying Information (PII) by configuring it to detect known patterns such as email or IP addresses. + * Text that matches a Grok pattern is replaced with a configurable string such as `` where an email address is matched or simply replace all matches with the text `` if preferred. */ redact?: IngestRedactProcessor + /** Extracts the registered domain (also known as the effective top-level + * domain or eTLD), sub-domain, and top-level domain from a fully qualified + * domain name (FQDN). Uses the registered domains defined in the Mozilla + * Public Suffix List. */ registered_domain?: IngestRegisteredDomainProcessor + /** Removes existing fields. + * If one field doesn’t exist, an exception will be thrown. */ remove?: IngestRemoveProcessor + /** Renames an existing field. + * If the field doesn’t exist or the new name is already used, an exception will be thrown. */ rename?: IngestRenameProcessor + /** Routes a document to another target index or data stream. + * When setting the `destination` option, the target is explicitly specified and the dataset and namespace options can’t be set. + * When the `destination` option is not set, this processor is in a data stream mode. Note that in this mode, the reroute processor can only be used on data streams that follow the data stream naming scheme. */ reroute?: IngestRerouteProcessor + /** Runs an inline or stored script on incoming documents. + * The script runs in the `ingest` context. */ script?: IngestScriptProcessor + /** Adds a field with the specified value. + * If the field already exists, its value will be replaced with the provided one. */ set?: IngestSetProcessor + /** Sets user-related details (such as `username`, `roles`, `email`, `full_name`, `metadata`, `api_key`, `realm` and `authentication_type`) from the current authenticated user to the current document by pre-processing the ingest. */ set_security_user?: IngestSetSecurityUserProcessor + /** Sorts the elements of an array ascending or descending. + * Homogeneous arrays of numbers will be sorted numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically. + * Throws an error when the field is not an array. */ sort?: IngestSortProcessor + /** Splits a field into an array using a separator character. + * Only works on string fields. */ split?: IngestSplitProcessor + /** Terminates the current ingest pipeline, causing no further processors to be run. + * This will normally be executed conditionally, using the `if` option. */ terminate?: IngestTerminateProcessor + /** Trims whitespace from a field. + * If the field is an array of strings, all members of the array will be trimmed. + * This only works on leading and trailing whitespace. */ trim?: IngestTrimProcessor + /** Converts a string to its uppercase equivalent. + * If the field is an array of strings, all members of the array will be converted. */ uppercase?: IngestUppercaseProcessor + /** URL-decodes a string. + * If the field is an array of strings, all members of the array will be decoded. */ urldecode?: IngestUrlDecodeProcessor + /** Parses a Uniform Resource Identifier (URI) string and extracts its components as an object. + * This URI object includes properties for the URI’s domain, path, fragment, port, query, scheme, user info, username, and password. */ uri_parts?: IngestUriPartsProcessor + /** The `user_agent` processor extracts details from the user agent string a browser sends with its web requests. + * This processor adds this information by default under the `user_agent` field. */ user_agent?: IngestUserAgentProcessor } export interface IngestRedact { + /** indicates if document has been redacted */ _is_redacted: boolean } export interface IngestRedactProcessor extends IngestProcessorBase { + /** The field to be redacted */ field: Field + /** A list of grok expressions to match and redact named captures with */ patterns: GrokPattern[] pattern_definitions?: Record + /** Start a redacted section with this token */ prefix?: string + /** End a redacted section with this token */ suffix?: string + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document */ skip_if_unlicensed?: boolean + /** If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted */ trace_redact?: boolean } export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { + /** Field containing the source FQDN. */ field: Field + /** Object field containing extracted domain components. If an empty string, + * the processor adds components to the document’s root. */ target_field?: Field + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestRemoveProcessor extends IngestProcessorBase { + /** Fields to be removed. Supports template snippets. */ field: Fields + /** Fields to be kept. When set, all fields other than those specified are removed. */ keep?: Fields + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean } export interface IngestRenameProcessor extends IngestProcessorBase { + /** The field to be renamed. + * Supports template snippets. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The new name of the field. + * Supports template snippets. */ target_field: Field } export interface IngestRerouteProcessor extends IngestProcessorBase { + /** A static value for the target. Can’t be set when the dataset or namespace option is set. */ destination?: string + /** Field references or a static value for the dataset part of the data stream name. + * In addition to the criteria for index names, cannot contain - and must be no longer than 100 characters. + * Example values are nginx.access and nginx.error. + * + * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). + * When resolving field references, the processor replaces invalid characters with _. Uses the part + * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. + * + * default {{data_stream.dataset}} */ dataset?: string | string[] + /** Field references or a static value for the namespace part of the data stream name. See the criteria for + * index names for allowed characters. Must be no longer than 100 characters. + * + * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). + * When resolving field references, the processor replaces invalid characters with _. Uses the part + * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. + * + * default {{data_stream.namespace}} */ namespace?: string | string[] } export interface IngestScriptProcessor extends IngestProcessorBase { + /** ID of a stored script. + * If no `source` is specified, this parameter is required. */ id?: Id + /** Script language. */ lang?: string + /** Object containing parameters for the script. */ params?: Record + /** Inline script. + * If no `id` is specified, this parameter is required. */ source?: string } export interface IngestSetProcessor extends IngestProcessorBase { + /** The origin field which will be copied to `field`, cannot set `value` simultaneously. + * Supported data types are `boolean`, `number`, `array`, `object`, `string`, `date`, etc. */ copy_from?: Field + /** The field to insert, upsert, or update. + * Supports template snippets. */ field: Field + /** If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document. */ ignore_empty_value?: boolean + /** The media type for encoding `value`. + * Applies only when value is a template snippet. + * Must be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`. */ media_type?: string + /** If `true` processor will update fields with pre-existing non-null-valued field. + * When set to `false`, such fields will not be touched. */ override?: boolean + /** The value to be set for the field. + * Supports template snippets. + * May specify only one of `value` or `copy_from`. */ value?: any } export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { + /** The field to store the user information into. */ field: Field + /** Controls what user related properties are added to the field. */ properties?: string[] } @@ -14715,16 +24899,27 @@ export interface IngestSimulateDocumentResult { } export interface IngestSortProcessor extends IngestProcessorBase { + /** The field to be sorted. */ field: Field + /** The sort order to use. + * Accepts `"asc"` or `"desc"`. */ order?: SortOrder + /** The field to assign the sorted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestSplitProcessor extends IngestProcessorBase { + /** The field to split. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Preserves empty trailing fields, if any. */ preserve_trailing?: boolean + /** A regex which matches the separator, for example, `,` or `\s+`. */ separator: string + /** The field to assign the split value to. + * By default, the field is updated in-place. */ target_field?: Field } @@ -14732,37 +24927,62 @@ export interface IngestTerminateProcessor extends IngestProcessorBase { } export interface IngestTrimProcessor extends IngestProcessorBase { + /** The string-valued field to trim whitespace from. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the trimmed value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUppercaseProcessor extends IngestProcessorBase { + /** The field to make uppercase. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUriPartsProcessor extends IngestProcessorBase { + /** Field containing the URI string. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** If `true`, the processor copies the unparsed URI to `.original`. */ keep_original?: boolean + /** If `true`, the processor removes the `field` after parsing the URI string. + * If parsing fails, the processor does not remove the `field`. */ remove_if_successful?: boolean + /** Output field for the URI object. */ target_field?: Field } export interface IngestUrlDecodeProcessor extends IngestProcessorBase { + /** The field to decode. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUserAgentProcessor extends IngestProcessorBase { + /** The field containing the user agent string. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the `regexes.yaml` from uap-core it ships with. */ regex_file?: string + /** The field that will be filled with the user agent details. */ target_field?: Field + /** Controls what properties are added to `target_field`. */ properties?: IngestUserAgentProperty[] + /** Extracts device type from the user agent string on a best-effort basis. + * @beta */ extract_device_type?: boolean } @@ -14772,52 +24992,96 @@ export interface IngestWeb { } export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { + /** A comma-separated list of geoip database configurations to delete */ id: Ids + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } } export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { + /** A comma-separated list of IP location database configurations. */ id: Ids + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } } export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase export interface IngestDeletePipelineRequest extends RequestBase { + /** Pipeline ID or wildcard expression of pipeline IDs used to limit the request. + * To delete all ingest pipelines in a cluster, use a value of `*`. */ id: Id + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } } export type IngestDeletePipelineResponse = AcknowledgedResponseBase export interface IngestGeoIpStatsGeoIpDownloadStatistics { + /** Total number of successful database downloads. */ successful_downloads: integer + /** Total number of failed database downloads. */ failed_downloads: integer + /** Total milliseconds spent downloading databases. */ total_download_time: DurationValue + /** Current number of databases available for use. */ databases_count: integer + /** Total number of database updates skipped. */ skipped_updates: integer + /** Total number of databases not updated after 30 days */ expired_databases: integer } export interface IngestGeoIpStatsGeoIpNodeDatabaseName { + /** Name of the database. */ name: Name } export interface IngestGeoIpStatsGeoIpNodeDatabases { + /** Downloaded databases for the node. */ databases: IngestGeoIpStatsGeoIpNodeDatabaseName[] + /** Downloaded database files, including related license files. Elasticsearch stores these files in the node’s temporary directory: $ES_TMPDIR/geoip-databases/. */ files_in_temp: string[] } export interface IngestGeoIpStatsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface IngestGeoIpStatsResponse { + /** Download statistics for all GeoIP2 databases. */ stats: IngestGeoIpStatsGeoIpDownloadStatistics + /** Downloaded GeoIP2 databases for each node. */ nodes: Record } @@ -14829,7 +25093,14 @@ export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { } export interface IngestGetGeoipDatabaseRequest extends RequestBase { + /** A comma-separated list of database configuration IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all database configurations, omit this parameter or use `*`. */ id?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface IngestGetGeoipDatabaseResponse { @@ -14845,7 +25116,14 @@ export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { } export interface IngestGetIpLocationDatabaseRequest extends RequestBase { + /** Comma-separated list of database configuration IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all database configurations, omit this parameter or use `*`. */ id?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface IngestGetIpLocationDatabaseResponse { @@ -14853,14 +25131,28 @@ export interface IngestGetIpLocationDatabaseResponse { } export interface IngestGetPipelineRequest extends RequestBase { + /** Comma-separated list of pipeline IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all ingest pipelines, omit this parameter or use `*`. */ id?: Id + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Return pipelines without their definitions (default: false) */ summary?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, summary?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, summary?: never } } export type IngestGetPipelineResponse = Record export interface IngestProcessorGrokRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface IngestProcessorGrokResponse { @@ -14868,44 +25160,92 @@ export interface IngestProcessorGrokResponse { } export interface IngestPutGeoipDatabaseRequest extends RequestBase { + /** ID of the database configuration to create or update. */ id: Id + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The provider-assigned name of the IP geolocation database to download. */ name: Name + /** The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. + * At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ maxmind: IngestMaxmind + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, name?: never, maxmind?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, name?: never, maxmind?: never } } export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase export interface IngestPutIpLocationDatabaseRequest extends RequestBase { + /** The database configuration identifier. */ id: Id + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. + * A value of `-1` indicates that the request should never time out. */ timeout?: Duration configuration?: IngestDatabaseConfiguration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, configuration?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, configuration?: never } } export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase export interface IngestPutPipelineRequest extends RequestBase { + /** ID of the ingest pipeline to create or update. */ id: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Required version for optimistic concurrency control for pipeline updates */ if_version?: VersionNumber + /** Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata + /** Description of the ingest pipeline. */ description?: string + /** Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. */ on_failure?: IngestProcessorContainer[] + /** Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. */ processors?: IngestProcessorContainer[] + /** Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. */ version?: VersionNumber + /** Marks this ingest pipeline as deprecated. + * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never } } export type IngestPutPipelineResponse = AcknowledgedResponseBase export interface IngestSimulateRequest extends RequestBase { + /** The pipeline to test. + * If you don't specify a `pipeline` in the request body, this parameter is required. */ id?: Id + /** If `true`, the response includes output data for each processor in the executed pipeline. */ verbose?: boolean + /** Sample documents to test in the pipeline. */ docs: IngestDocument[] + /** The pipeline to test. + * If you don't specify the `pipeline` request path parameter, this parameter is required. + * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline?: IngestPipeline + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } } export interface IngestSimulateResponse { @@ -14930,8 +25270,14 @@ export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' export interface LicenseDeleteRequest extends RequestBase { + /** The period to wait for a connection to the master node. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type LicenseDeleteResponse = AcknowledgedResponseBase @@ -14952,8 +25298,15 @@ export interface LicenseGetLicenseInformation { } export interface LicenseGetRequest extends RequestBase { + /** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. + * This parameter is deprecated and will always be set to true in 8.x. */ accept_enterprise?: boolean + /** Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. */ local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { accept_enterprise?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { accept_enterprise?: never, local?: never } } export interface LicenseGetResponse { @@ -14961,6 +25314,10 @@ export interface LicenseGetResponse { } export interface LicenseGetBasicStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface LicenseGetBasicStatusResponse { @@ -14968,6 +25325,10 @@ export interface LicenseGetBasicStatusResponse { } export interface LicenseGetTrialStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface LicenseGetTrialStatusResponse { @@ -14980,11 +25341,19 @@ export interface LicensePostAcknowledgement { } export interface LicensePostRequest extends RequestBase { + /** Specifies whether you acknowledge the license changes. */ acknowledge?: boolean + /** The period to wait for a connection to the master node. */ master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration license?: LicenseLicense + /** A sequence of one or more JSON documents containing the license information. */ licenses?: LicenseLicense[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never, license?: never, licenses?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never, license?: never, licenses?: never } } export interface LicensePostResponse { @@ -14994,9 +25363,16 @@ export interface LicensePostResponse { } export interface LicensePostStartBasicRequest extends RequestBase { + /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never } } export interface LicensePostStartBasicResponse { @@ -15008,9 +25384,16 @@ export interface LicensePostStartBasicResponse { } export interface LicensePostStartTrialRequest extends RequestBase { + /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean + /** The type of trial license to generate (default: "trial") */ type?: string + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, type?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, type?: never, master_timeout?: never } } export interface LicensePostStartTrialResponse { @@ -15021,11 +25404,21 @@ export interface LicensePostStartTrialResponse { } export interface LogstashPipeline { + /** A description of the pipeline. + * This description is not used by Elasticsearch or Logstash. */ description: string + /** The date the pipeline was last updated. + * It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. */ last_modified: DateTime + /** The configuration for the pipeline. */ pipeline: string + /** Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. */ pipeline_metadata: LogstashPipelineMetadata + /** Settings for the pipeline. + * It supports only flat keys in dot notation. */ pipeline_settings: LogstashPipelineSettings + /** The user who last updated the pipeline. */ username: string } @@ -15035,37 +25428,62 @@ export interface LogstashPipelineMetadata { } export interface LogstashPipelineSettings { + /** The number of workers that will, in parallel, execute the filter and output stages of the pipeline. */ 'pipeline.workers': integer + /** The maximum number of events an individual worker thread will collect from inputs before attempting to execute its filters and outputs. */ 'pipeline.batch.size': integer + /** When creating pipeline event batches, how long in milliseconds to wait for each event before dispatching an undersized batch to pipeline workers. */ 'pipeline.batch.delay': integer + /** The internal queuing model to use for event buffering. */ 'queue.type': string + /** The total capacity of the queue (`queue.type: persisted`) in number of bytes. */ 'queue.max_bytes': string + /** The maximum number of written events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). */ 'queue.checkpoint.writes': integer } export interface LogstashDeletePipelineRequest extends RequestBase { + /** An identifier for the pipeline. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type LogstashDeletePipelineResponse = boolean export interface LogstashGetPipelineRequest extends RequestBase { + /** A comma-separated list of pipeline identifiers. */ id?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type LogstashGetPipelineResponse = Record export interface LogstashPutPipelineRequest extends RequestBase { + /** An identifier for the pipeline. */ id: Id pipeline?: LogstashPipeline + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, pipeline?: never } } export type LogstashPutPipelineResponse = boolean export interface MigrationDeprecationsDeprecation { + /** Optional details about the deprecation warning. */ details?: string + /** The level property describes the significance of the issue. */ level: MigrationDeprecationsDeprecationLevel + /** Descriptive information about the deprecation warning. */ message: string + /** A link to the breaking change documentation, where you can find more information about this change. */ url: string resolve_during_rolling_upgrade: boolean _meta?: Record @@ -15074,16 +25492,30 @@ export interface MigrationDeprecationsDeprecation { export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' export interface MigrationDeprecationsRequest extends RequestBase { + /** Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. */ index?: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export interface MigrationDeprecationsResponse { + /** Cluster-level deprecation warnings. */ cluster_settings: MigrationDeprecationsDeprecation[] + /** Index warnings are sectioned off per index and can be filtered using an index-pattern in the query. + * This section includes warnings for the backing indices of data streams specified in the request path. */ index_settings: Record data_streams: Record + /** Node-level deprecation warnings. + * Since only a subset of your nodes might incorporate these settings, it is important to read the details section for more information about which nodes are affected. */ node_settings: MigrationDeprecationsDeprecation[] + /** Machine learning-related deprecation warnings. */ ml_settings: MigrationDeprecationsDeprecation[] + /** Template warnings are sectioned off per template and include deprecations for both component templates and + * index templates. */ templates: Record + /** ILM policy warnings are sectioned off per policy. */ ilm_policies: Record } @@ -15103,6 +25535,10 @@ export interface MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo { export type MigrationGetFeatureUpgradeStatusMigrationStatus = 'NO_MIGRATION_NEEDED' | 'MIGRATION_NEEDED' | 'IN_PROGRESS' | 'ERROR' export interface MigrationGetFeatureUpgradeStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface MigrationGetFeatureUpgradeStatusResponse { @@ -15115,6 +25551,10 @@ export interface MigrationPostFeatureUpgradeMigrationFeature { } export interface MigrationPostFeatureUpgradeRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface MigrationPostFeatureUpgradeResponse { @@ -15124,72 +25564,151 @@ export interface MigrationPostFeatureUpgradeResponse { } export interface MlAdaptiveAllocationsSettings { + /** If true, adaptive_allocations is enabled */ enabled: boolean + /** Specifies the minimum number of allocations to scale to. + * If set, it must be greater than or equal to 0. + * If not defined, the deployment scales to 0. */ min_number_of_allocations?: integer + /** Specifies the maximum number of allocations to scale to. + * If set, it must be greater than or equal to min_number_of_allocations. */ max_number_of_allocations?: integer } export interface MlAnalysisConfig { + /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a + * whole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation. */ bucket_span?: Duration + /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. The `categorization_analyzer` field can be specified either as a string or as an object. If it is a string, it must refer to a built-in analyzer or one added by another plugin. */ categorization_analyzer?: MlCategorizationAnalyzer + /** If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ categorization_field_name?: Field + /** If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as `categorization_analyzer`. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the `categorization_analyzer` property instead and include the filters as pattern_replace character filters. The effect is exactly the same. */ categorization_filters?: string[] + /** Detector configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. If the detectors array does not contain at least one detector, no analysis can occur and an error is returned. */ detectors: MlDetector[] + /** A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ influencers?: Field[] + /** The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second. NOTE: Latency is applicable only when you send data by using the post data API. */ latency?: Duration + /** Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ model_prune_window?: Duration + /** This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. For example, suppose CPU and memory usage on host A is usually highly correlated with the same metrics on host B. Perhaps this correlation occurs because they are running a load-balanced application. If you enable this property, anomalies will be reported when, for example, CPU usage on host A is high and the value of CPU usage on host B is low. That is to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU of host B. To use the `multivariate_by_fields` property, you must also specify `by_field_name` in your detector. */ multivariate_by_fields?: boolean + /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization + /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. NOTE: The `summary_count_field_name` property cannot be used with the `metric` function. */ summary_count_field_name?: Field } export interface MlAnalysisConfigRead { + /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. */ bucket_span: Duration + /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. + * This property cannot be used at the same time as `categorization_filters`. + * The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. */ categorization_analyzer?: MlCategorizationAnalyzer + /** If this property is specified, the values of the specified field will be categorized. + * The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ categorization_field_name?: Field + /** If `categorization_field_name` is specified, you can also define optional filters. + * This property expects an array of regular expressions. + * The expressions are used to filter out matching sequences from the categorization field values. */ categorization_filters?: string[] + /** An array of detector configuration objects. + * Detector configuration objects specify which data fields a job analyzes. + * They also specify which analytical functions are used. + * You can specify multiple detectors for a job. */ detectors: MlDetectorRead[] + /** A comma separated list of influencer field names. + * Typically these can be the by, over, or partition fields that are used in the detector configuration. + * You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. + * When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ influencers: Field[] + /** Advanced configuration option. + * Affects the pruning of models that have not been updated for the given time duration. + * The value must be set to a multiple of the `bucket_span`. + * If set too low, important information may be removed from the model. + * Typically, set to `30d` or longer. + * If not set, model pruning only occurs if the model memory status reaches the soft limit or the hard limit. + * For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ model_prune_window?: Duration + /** The size of the window in which to expect data that is out of time order. + * Defaults to no latency. + * If you specify a non-zero value, it must be greater than or equal to one second. */ latency?: Duration + /** This functionality is reserved for internal use. + * It is not supported for use in customer environments and is not subject to the support SLA of official GA features. + * If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. */ multivariate_by_fields?: boolean + /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization + /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. + * This property value is the name of the field that contains the count of raw data points that have been summarized. + * The same `summary_count_field_name` applies to all detectors in the job. */ summary_count_field_name?: Field } export interface MlAnalysisLimits { + /** The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization. */ categorization_examples_limit?: long + /** The approximate maximum amount of memory resources that are required for analytical processing. Once this limit is approached, data pruning becomes more aggressive. Upon exceeding this limit, new entities are not modeled. If the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and less than 1024mb, that value is used instead of the default. The default value is relatively small to ensure that high resource usage is a conscious decision. If you have jobs that are expected to analyze high cardinality fields, you will likely need to use a higher value. If you specify a number instead of a string, the units are assumed to be MiB. Specifying a string is recommended for clarity. If you specify a byte size unit of `b` or `kb` and the number does not equate to a discrete number of megabytes, it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you specify a value less than 1 MiB, an error occurs. If you specify a value for the `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create jobs that have `model_memory_limit` values greater than that setting value. */ model_memory_limit?: ByteSize } export interface MlAnalysisMemoryLimit { + /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ model_memory_limit: string } export interface MlAnomaly { + /** The actual value for the bucket. */ actual?: double[] + /** Information about the factors impacting the initial anomaly score. */ anomaly_score_explanation?: MlAnomalyExplanation + /** The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. */ bucket_span: DurationValue + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ by_field_name?: string + /** The value of `by_field_name`. */ by_field_value?: string + /** For population analysis, an over field must be specified in the detector. This property contains an array of anomaly records that are the causes for the anomaly that has been identified for the over field. This sub-resource contains the most anomalous records for the `over_field_name`. For scalability reasons, a maximum of the 10 most significant causes of the anomaly are returned. As part of the core analytical modeling, these low-level anomaly records are aggregated for their parent over field record. The `causes` resource contains similar elements to the record resource, namely `actual`, `typical`, `geo_results.actual_point`, `geo_results.typical_point`, `*_field_name` and `*_field_value`. Probability and scores are not applicable to causes. */ causes?: MlAnomalyCause[] + /** A unique identifier for the detector. */ detector_index: integer + /** Certain functions require a field to operate on, for example, `sum()`. For those functions, this value is the name of the field to be analyzed. */ field_name?: string + /** The function in which the anomaly occurs, as specified in the detector configuration. For example, `max`. */ function?: string + /** The description of the function in which the anomaly occurs, as specified in the detector configuration. */ function_description?: string + /** If the detector function is `lat_long`, this object contains comma delimited strings for the latitude and longitude of the actual and typical values. */ geo_results?: MlGeoResults + /** If influencers were specified in the detector configuration, this array contains influencers that contributed to or were to blame for an anomaly. */ influencers?: MlInfluence[] + /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. This is the initial value that was calculated at the time the bucket was processed. */ initial_record_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: string + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ over_field_name?: string + /** The value of `over_field_name`. */ over_field_value?: string + /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: string + /** The value of `partition_field_name`. */ partition_field_value?: string + /** The probability of the individual anomaly occurring, in the range 0 to 1. For example, `0.0000772031`. This value can be held to a high precision of over 300 decimal places, so the `record_score` is provided as a human-readable and friendly interpretation of this. */ probability: double + /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. Unlike `initial_record_score`, this value will be updated by a re-normalization process as new data is analyzed. */ record_score: double + /** Internal. This is always set to `record`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The typical value for the bucket, according to analytical modeling. */ typical?: double[] } @@ -15212,82 +25731,142 @@ export interface MlAnomalyCause { } export interface MlAnomalyExplanation { + /** Impact from the duration and magnitude of the detected anomaly relative to the historical average. */ anomaly_characteristics_impact?: integer + /** Length of the detected anomaly in the number of buckets. */ anomaly_length?: integer + /** Type of the detected anomaly: `spike` or `dip`. */ anomaly_type?: string + /** Indicates reduction of anomaly score for the bucket with large confidence intervals. If a bucket has large confidence intervals, the score is reduced. */ high_variance_penalty?: boolean + /** If the bucket contains fewer samples than expected, the score is reduced. */ incomplete_bucket_penalty?: boolean + /** Lower bound of the 95% confidence interval. */ lower_confidence_bound?: double + /** Impact of the deviation between actual and typical values in the past 12 buckets. */ multi_bucket_impact?: integer + /** Impact of the deviation between actual and typical values in the current bucket. */ single_bucket_impact?: integer + /** Typical (expected) value for this bucket. */ typical_value?: double + /** Upper bound of the 95% confidence interval. */ upper_confidence_bound?: double } export interface MlApiKeyAuthorization { + /** The identifier for the API key. */ id: string + /** The name of the API key. */ name: string } export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' export interface MlBucketInfluencer { + /** A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as + * newer data is analyzed. */ anomaly_score: double + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** The field name of the influencer. */ influencer_field_name: Field + /** The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the + * time the bucket was processed. */ initial_anomaly_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision + * of over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of + * this. */ probability: double + /** Internal. */ raw_anomaly_score: double + /** Internal. This value is always set to `bucket_influencer`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The start time of the bucket for which these results were calculated. */ timestamp_string?: DateTime } export interface MlBucketSummary { + /** The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited + * score for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as + * new data is analyzed. */ anomaly_score: double bucket_influencers: MlBucketInfluencer[] + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** The number of input data records processed in this bucket. */ event_count: long + /** The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the + * time the bucket was processed. */ initial_anomaly_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The amount of time, in milliseconds, that it took to analyze the bucket contents and calculate results. */ processing_time_ms: DurationValue + /** Internal. This value is always set to bucket. */ result_type: string + /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the + * timestamp of the bucket are included in the results for the bucket. */ timestamp: EpochTime + /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the + * timestamp of the bucket are included in the results for the bucket. */ timestamp_string?: DateTime } export interface MlCalendarEvent { + /** A string that uniquely identifies a calendar. */ calendar_id?: Id event_id?: Id + /** A description of the scheduled event. */ description: string + /** The timestamp for the end of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ end_time: DateTime + /** The timestamp for the beginning of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ start_time: DateTime } export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition export interface MlCategorizationAnalyzerDefinition { + /** One or more character filters. In addition to the built-in character filters, other plugins can provide more character filters. If this property is not specified, no character filters are applied prior to categorization. If you are customizing some other aspect of the analyzer and you need to achieve the equivalent of `categorization_filters` (which are not permitted when some other aspect of the analyzer is customized), add them here as pattern replace character filters. */ char_filter?: AnalysisCharFilter[] + /** One or more token filters. In addition to the built-in token filters, other plugins can provide more token filters. If this property is not specified, no token filters are applied prior to categorization. */ filter?: AnalysisTokenFilter[] + /** The name or definition of the tokenizer to use after character filters are applied. This property is compulsory if `categorization_analyzer` is specified as an object. Machine learning provides a tokenizer called `ml_standard` that tokenizes in a way that has been determined to produce good categorization results on a variety of log file formats for logs in English. If you want to use that tokenizer but change the character or token filters, specify "tokenizer": "ml_standard" in your `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is available, which tokenizes in the same way as the non-customizable tokenizer in old versions of the product (before 6.2). `ml_classic` was the default categorization tokenizer in versions 6.2 to 7.13, so if you need categorization identical to the default for jobs created in these versions, specify "tokenizer": "ml_classic" in your `categorization_analyzer`. */ tokenizer?: AnalysisTokenizer } export type MlCategorizationStatus = 'ok' | 'warn' export interface MlCategory { + /** A unique identifier for the category. category_id is unique at the job level, even when per-partition categorization is enabled. */ category_id: ulong + /** A list of examples of actual values that matched the category. */ examples: string[] + /** [experimental] A Grok pattern that could be used in Logstash or an ingest pipeline to extract fields from messages that match the category. This field is experimental and may be changed or removed in a future release. The Grok patterns that are found are not optimal, but are often a good starting point for manual tweaking. */ grok_pattern?: GrokPattern + /** Identifier for the anomaly detection job. */ job_id: Id + /** The maximum length of the fields that matched the category. The value is increased by 10% to enable matching for similar fields that have not been analyzed. */ max_matching_length: ulong + /** If per-partition categorization is enabled, this property identifies the field used to segment the categorization. It is not present when per-partition categorization is disabled. */ partition_field_name?: string + /** If per-partition categorization is enabled, this property identifies the value of the partition_field_name for the category. It is not present when per-partition categorization is disabled. */ partition_field_value?: string + /** A regular expression that is used to search for values that match the category. */ regex: string + /** A space separated list of the common tokens that are matched in values of the category. */ terms: string + /** The number of messages that have been matched by this category. This is only guaranteed to have the latest accurate count after a job _flush or _close */ num_matches?: long + /** A list of category_id entries that this current category encompasses. Any new message that is processed by the categorizer will match against this category and not any of the categories in this list. This is only guaranteed to have the latest accurate list of categories after a job _flush or _close */ preferred_to_categories?: Id[] p?: string result_type: string @@ -15295,25 +25874,40 @@ export interface MlCategory { } export interface MlChunkingConfig { + /** If the mode is `auto`, the chunk size is dynamically calculated; + * this is the recommended value when the datafeed does not use aggregations. + * If the mode is `manual`, chunking is applied according to the specified `time_span`; + * use this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied. */ mode: MlChunkingMode + /** The time span that each search will be querying. This setting is applicable only when the `mode` is set to `manual`. */ time_span?: Duration } export type MlChunkingMode = 'auto' | 'manual' | 'off' export interface MlClassificationInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer + /** Specifies the type of the predicted field to write. Acceptable values are: string, number, boolean. When boolean is provided 1.0 is transformed to true and 0.0 to false. */ prediction_field_type?: string + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Specifies the field to which the top classes are written. Defaults to top_classes. */ top_classes_results_field?: string } export interface MlCommonTokenizationConfig { + /** Should the tokenizer lower case the text */ do_lower_case?: boolean + /** Maximum input sequence length for the model */ max_sequence_length?: integer + /** Tokenization spanning options. Special value of -1 indicates no spanning takes place */ span?: integer + /** Should tokenization input be automatically truncated before sending to the model for inference */ truncate?: MlTokenizationTruncate + /** Is tokenization completed with special tokens */ with_special_tokens?: boolean } @@ -15344,15 +25938,20 @@ export interface MlDataCounts { } export interface MlDataDescription { + /** Only JSON format is supported at this time. */ format?: string + /** The name of the field that contains the timestamp. */ time_field?: Field + /** The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either integer or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails. */ time_format?: string field_delimiter?: string } export interface MlDatafeed { aggregations?: Record + /** @alias aggregations */ aggs?: Record + /** The security privileges that the datafeed uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the datafeed, this property is omitted. */ authorization?: MlDatafeedAuthorization chunking_config?: MlChunkingConfig datafeed_id: Id @@ -15371,226 +25970,367 @@ export interface MlDatafeed { } export interface MlDatafeedAuthorization { + /** If an API key was used for the most recent update to the datafeed, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the datafeed, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the datafeed, the account name is listed in the response. */ service_account?: string } export interface MlDatafeedConfig { + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + * @alias aggregations */ aggs?: Record + /** Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated and is an advanced configuration option. */ chunking_config?: MlChunkingConfig + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. The default value is the job identifier. */ datafeed_id?: Id + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` option is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration + /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices + /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: Indices + /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period) then it will automatically stop itself and close its associated job after this many real-time searches that return no documents. In other words, it will stop after `frequency` times `max_empty_searches` of real-time operation. If not set then a datafeed with no end time that sees no data will remain started until it is explicitly stopped. */ max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer } export interface MlDatafeedRunningState { + /** Indicates if the datafeed is "real-time"; meaning that the datafeed has no configured `end` time. */ real_time_configured: boolean + /** Indicates whether the datafeed has finished running on the available past data. + * For datafeeds without a configured `end` time, this means that the datafeed is now running on "real-time" data. */ real_time_running: boolean + /** Provides the latest time interval the datafeed has searched. */ search_interval?: MlRunningStateSearchInterval } export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { + /** For started datafeeds only, contains messages relating to the selection of a node. */ assignment_explanation?: string + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id + /** For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNodeCompact + /** The status of the datafeed, which can be one of the following values: `starting`, `started`, `stopping`, `stopped`. */ state: MlDatafeedState + /** An object that provides statistical information about timing aspect of this datafeed. */ timing_stats?: MlDatafeedTimingStats + /** An object containing the running state for this datafeed. + * It is only provided if the datafeed is started. */ running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { + /** The number of buckets processed. */ bucket_count: long + /** The exponential average search time per hour, in milliseconds. */ exponential_average_search_time_per_hour_ms: DurationValue exponential_average_calculation_context?: MlExponentialAverageCalculationContext + /** Identifier for the anomaly detection job. */ job_id: Id + /** The number of searches run by the datafeed. */ search_count: long + /** The total time the datafeed spent searching, in milliseconds. */ total_search_time_ms: DurationValue + /** The average search time per bucket, in milliseconds. */ average_search_time_per_bucket_ms?: DurationValue } export interface MlDataframeAnalysis { + /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */ alpha?: double + /** Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable. + * For classification analysis, the data type of the field must be numeric (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field. + * For regression analysis, the data type of the field must be numeric. */ dependent_variable: string + /** Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */ downsample_factor?: double + /** Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable. */ early_stopping_enabled?: boolean + /** Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between 0.001 and 1. */ eta?: double + /** Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between 0.5 and 2. */ eta_growth_rate_per_tree?: double + /** Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */ feature_bag_fraction?: double + /** Advanced configuration option. A collection of feature preprocessors that modify one or more included fields. The analysis uses the resulting one or more features instead of the original document field. However, these features are ephemeral; they are not stored in the destination index. Multiple `feature_processors` entries can refer to the same document fields. Automatic categorical feature encoding still occurs for the fields that are unprocessed by a custom processor or that have categorical values. Use this property only if you want to override the automatic feature encoding of the specified fields. */ feature_processors?: MlDataframeAnalysisFeatureProcessor[] + /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ gamma?: double + /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ lambda?: double + /** Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */ max_optimization_rounds_per_hyperparameter?: integer + /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */ max_trees?: integer + /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. + * @alias max_trees */ maximum_number_trees?: integer + /** Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs. */ num_top_feature_importance_values?: integer + /** Defines the name of the prediction field in the results. Defaults to `_prediction`. */ prediction_field_name?: Field + /** Defines the seed for the random generator that is used to pick training data. By default, it is randomly generated. Set it to a specific value to use the same training data each time you start a job (assuming other related parameters such as `source` and `analyzed_fields` are the same). */ randomize_seed?: double + /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */ soft_tree_depth_limit?: integer + /** Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */ soft_tree_depth_tolerance?: double + /** Defines what percentage of the eligible documents that will be used for training. Documents that are ignored by the analysis (for example those that contain arrays with more than one value) won’t be included in the calculation for used percentage. */ training_percent?: Percentage } export interface MlDataframeAnalysisAnalyzedFields { + /** An array of strings that defines the fields that will be excluded from the analysis. You do not need to add fields with unsupported data types to excludes, these fields are excluded from the analysis automatically. */ includes: string[] + /** An array of strings that defines the fields that will be included in the analysis. */ excludes: string[] } export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { class_assignment_objective?: string + /** Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories. */ num_top_classes?: integer } export interface MlDataframeAnalysisContainer { + /** The configuration information necessary to perform classification. */ classification?: MlDataframeAnalysisClassification + /** The configuration information necessary to perform outlier detection. NOTE: Advanced parameters are for fine-tuning classification analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ outlier_detection?: MlDataframeAnalysisOutlierDetection + /** The configuration information necessary to perform regression. NOTE: Advanced parameters are for fine-tuning regression analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ regression?: MlDataframeAnalysisRegression } export interface MlDataframeAnalysisFeatureProcessor { + /** The configuration information necessary to perform frequency encoding. */ frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding + /** The configuration information necessary to perform multi encoding. It allows multiple processors to be changed together. This way the output of a processor can then be passed to another as an input. */ multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding + /** The configuration information necessary to perform n-gram encoding. Features created by this encoder have the following name format: .. For example, if the feature_prefix is f, the feature name for the second unigram in a string is f.11. */ n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding + /** The configuration information necessary to perform one hot encoding. */ one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding + /** The configuration information necessary to perform target mean encoding. */ target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding } export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { + /** The resulting feature name. */ feature_name: Name field: Field + /** The resulting frequency map for the field value. If the field value is missing from the frequency_map, the resulting value is 0. */ frequency_map: Record } export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { + /** The ordered array of custom processors to execute. Must be more than 1. */ processors: integer[] } export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { + /** The feature name prefix. Defaults to ngram__. */ feature_prefix?: string + /** The name of the text field to encode. */ field: Field + /** Specifies the length of the n-gram substring. Defaults to 50. Must be greater than 0. */ length?: integer + /** Specifies which n-grams to gather. It’s an array of integer values where the minimum value is 1, and a maximum value is 5. */ n_grams: integer[] + /** Specifies the zero-indexed start of the n-gram substring. Negative values are allowed for encoding n-grams of string suffixes. Defaults to 0. */ start?: integer custom?: boolean } export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { + /** The name of the field to encode. */ field: Field + /** The one hot map mapping the field value with the column name. */ hot_map: string } export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { + /** The default value if field value is not found in the target_map. */ default_value: integer + /** The resulting feature name. */ feature_name: Name + /** The name of the field to encode. */ field: Field + /** The field value to target mean transition map. */ target_map: Record } export interface MlDataframeAnalysisOutlierDetection { + /** Specifies whether the feature influence calculation is enabled. */ compute_feature_influence?: boolean + /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1. */ feature_influence_threshold?: double + /** The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ method?: string + /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ n_neighbors?: integer + /** The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ outlier_fraction?: double + /** If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. */ standardization_enabled?: boolean } export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { + /** The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss). */ loss_function?: string + /** A positive number that is used as a parameter to the `loss_function`. */ loss_function_parameter?: double } export interface MlDataframeAnalytics { + /** An object containing information about the analysis job. */ analysis_stats?: MlDataframeAnalyticsStatsContainer + /** For running jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** An object that provides counts for the quantity of documents skipped, used in training, or available for testing. */ data_counts: MlDataframeAnalyticsStatsDataCounts + /** The unique identifier of the data frame analytics job. */ id: Id + /** An object describing memory usage of the analytics. It is present only after the job is started and memory usage is reported. */ memory_usage: MlDataframeAnalyticsStatsMemoryUsage + /** Contains properties for the node that runs the job. This information is available only for running jobs. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: NodeAttributes + /** The progress report of the data frame analytics job by phase. */ progress: MlDataframeAnalyticsStatsProgress[] + /** The status of the data frame analytics job, which can be one of the following values: failed, started, starting, stopping, stopped. */ state: MlDataframeState } export interface MlDataframeAnalyticsAuthorization { + /** If an API key was used for the most recent update to the job, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the job, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the job, the account name is listed in the response. */ service_account?: string } export interface MlDataframeAnalyticsDestination { + /** Defines the destination index to store the results of the data frame analytics job. */ index: IndexName + /** Defines the name of the field in which to store the results of the analysis. Defaults to `ml`. */ results_field?: Field } export interface MlDataframeAnalyticsFieldSelection { + /** Whether the field is selected to be included in the analysis. */ is_included: boolean + /** Whether the field is required. */ is_required: boolean + /** The feature type of this field for the analysis. May be categorical or numerical. */ feature_type?: string + /** The mapping types of the field. */ mapping_types: string[] + /** The field name. */ name: Field + /** The reason a field is not selected to be included in the analysis. */ reason?: string } export interface MlDataframeAnalyticsMemoryEstimation { + /** Estimated memory usage under the assumption that overflowing to disk is allowed during data frame analytics. expected_memory_with_disk is usually smaller than expected_memory_without_disk as using disk allows to limit the main memory needed to perform data frame analytics. */ expected_memory_with_disk: string + /** Estimated memory usage under the assumption that the whole data frame analytics should happen in memory (i.e. without overflowing to disk). */ expected_memory_without_disk: string } export interface MlDataframeAnalyticsSource { + /** Index or indices on which to perform the analysis. It can be a single index or index pattern as well as an array of indices or patterns. NOTE: If your source indices contain documents with the same IDs, only the document that is indexed last appears in the destination index. */ index: Indices + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. By default, this property has the following value: {"match_all": {}}. */ query?: QueryDslQueryContainer + /** Definitions of runtime fields that will become part of the mapping of the destination index. */ runtime_mappings?: MappingRuntimeFields + /** Specify `includes` and/or `excludes patterns to select which fields will be present in the destination. Fields that are excluded cannot be included in the analysis. */ _source?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlDataframeAnalyticsStatsContainer { + /** An object containing information about the classification analysis job. */ classification_stats?: MlDataframeAnalyticsStatsHyperparameters + /** An object containing information about the outlier detection job. */ outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection + /** An object containing information about the regression analysis. */ regression_stats?: MlDataframeAnalyticsStatsHyperparameters } export interface MlDataframeAnalyticsStatsDataCounts { + /** The number of documents that are skipped during the analysis because they contained values that are not supported by the analysis. For example, outlier detection does not support missing fields so it skips documents with missing fields. Likewise, all types of analysis skip documents that contain arrays with more than one element. */ skipped_docs_count: integer + /** The number of documents that are not used for training the model and can be used for testing. */ test_docs_count: integer + /** The number of documents that are used for training the model. */ training_docs_count: integer } export interface MlDataframeAnalyticsStatsHyperparameters { + /** An object containing the parameters of the classification analysis job. */ hyperparameters: MlHyperparameters + /** The number of iterations on the analysis. */ iteration: integer + /** The timestamp when the statistics were reported in milliseconds since the epoch. */ timestamp: EpochTime + /** An object containing time statistics about the data frame analytics job. */ timing_stats: MlTimingStats + /** An object containing information about validation loss. */ validation_loss: MlValidationLoss } export interface MlDataframeAnalyticsStatsMemoryUsage { + /** This value is present when the status is hard_limit and it is a new estimate of how much memory the job needs. */ memory_reestimate_bytes?: long + /** The number of bytes used at the highest peak of memory usage. */ peak_usage_bytes: long + /** The memory usage status. */ status: string + /** The timestamp when memory usage was calculated. */ timestamp?: EpochTime } export interface MlDataframeAnalyticsStatsOutlierDetection { + /** The list of job parameters specified by the user or determined by algorithmic heuristics. */ parameters: MlOutlierDetectionParameters + /** The timestamp when the statistics were reported in milliseconds since the epoch. */ timestamp: EpochTime + /** An object containing time statistics about the data frame analytics job. */ timing_stats: MlTimingStats } export interface MlDataframeAnalyticsStatsProgress { + /** Defines the phase of the data frame analytics job. */ phase: string + /** The progress that the data frame analytics job has made expressed in percentage. */ progress_percent: integer } @@ -15598,6 +26338,7 @@ export interface MlDataframeAnalyticsSummary { allow_lazy_start?: boolean analysis: MlDataframeAnalysisContainer analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** The security privileges that the job uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the job, this property is omitted. */ authorization?: MlDataframeAnalyticsAuthorization create_time?: EpochTime description?: string @@ -15611,69 +26352,100 @@ export interface MlDataframeAnalyticsSummary { } export interface MlDataframeEvaluationClassification { + /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ actual_field: Field + /** The field in the index which contains the predicted value, in other words the results of the classification analysis. */ predicted_field?: Field + /** The field of the index which is an array of documents of the form { "class_name": XXX, "class_probability": YYY }. This field must be defined as nested in the mappings. */ top_classes_field?: Field + /** Specifies the metrics that are used for the evaluation. */ metrics?: MlDataframeEvaluationClassificationMetrics } export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { + /** Accuracy of predictions (per-class and overall). */ accuracy?: Record + /** Multiclass confusion matrix. */ multiclass_confusion_matrix?: Record } export interface MlDataframeEvaluationClassificationMetricsAucRoc { + /** Name of the only class that is treated as positive during AUC ROC calculation. Other classes are treated as negative ("one-vs-all" strategy). All the evaluated documents must have class_name in the list of their top classes. */ class_name?: Name + /** Whether or not the curve should be returned in addition to the score. Default value is false. */ include_curve?: boolean } export interface MlDataframeEvaluationContainer { + /** Classification evaluation evaluates the results of a classification analysis which outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlDataframeEvaluationClassification + /** Outlier detection evaluates the results of an outlier detection analysis which outputs the probability that each document is an outlier. */ outlier_detection?: MlDataframeEvaluationOutlierDetection + /** Regression evaluation evaluates the results of a regression analysis which outputs a prediction of values. */ regression?: MlDataframeEvaluationRegression } export interface MlDataframeEvaluationMetrics { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. It is calculated for a specific class (provided as "class_name") treated as positive. */ auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc + /** Precision of predictions (per-class and average). */ precision?: Record + /** Recall of predictions (per-class and average). */ recall?: Record } export interface MlDataframeEvaluationOutlierDetection { + /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ actual_field: Field + /** The field of the index that defines the probability of whether the item belongs to the class in question or not. It’s the field that contains the results of the analysis. */ predicted_probability_field: Field + /** Specifies the metrics that are used for the evaluation. */ metrics?: MlDataframeEvaluationOutlierDetectionMetrics } export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { + /** Accuracy of predictions (per-class and overall). */ confusion_matrix?: Record } export interface MlDataframeEvaluationRegression { + /** The field of the index which contains the ground truth. The data type of this field must be numerical. */ actual_field: Field + /** The field in the index that contains the predicted value, in other words the results of the regression analysis. */ predicted_field: Field + /** Specifies the metrics that are used for the evaluation. For more information on mse, msle, and huber, consult the Jupyter notebook on regression loss functions. */ metrics?: MlDataframeEvaluationRegressionMetrics } export interface MlDataframeEvaluationRegressionMetrics { + /** Average squared difference between the predicted values and the actual (ground truth) value. For more information, read this wiki article. */ mse?: Record + /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (ground truth) value. */ msle?: MlDataframeEvaluationRegressionMetricsMsle + /** Pseudo Huber loss function. */ huber?: MlDataframeEvaluationRegressionMetricsHuber + /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ r_squared?: Record } export interface MlDataframeEvaluationRegressionMetricsHuber { + /** Approximates 1/2 (prediction - actual)2 for values much less than delta and approximates a straight line with slope delta for values much larger than delta. Defaults to 1. Delta needs to be greater than 0. */ delta?: double } export interface MlDataframeEvaluationRegressionMetricsMsle { + /** Defines the transition point at which you switch from minimizing quadratic error to minimizing quadratic log error. Defaults to 1. */ offset?: double } export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' export interface MlDelayedDataCheckConfig { + /** The window of time that is searched for late data. This window of time ends with the latest finalized bucket. + * It defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs. + * In particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`. */ check_window?: Duration + /** Specifies whether the datafeed periodically checks for delayed data. */ enabled: boolean } @@ -15682,40 +26454,82 @@ export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_alloca export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' export interface MlDetectionRule { + /** The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined. */ actions?: MlRuleAction[] + /** An array of numeric conditions when the rule applies. A rule must either have a non-empty scope or at least one condition. Multiple conditions are combined together with a logical AND. */ conditions?: MlRuleCondition[] + /** A scope of series where the rule applies. A rule must either have a non-empty scope or at least one condition. By default, the scope includes all series. Scoping is allowed for any of the fields that are also specified in `by_field_name`, `over_field_name`, or `partition_field_name`. */ scope?: Record } export interface MlDetector { + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ by_field_name?: Field + /** Custom rules enable you to customize the way detectors operate. For example, a rule may dictate conditions under which results should be skipped. Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] + /** A description of the detector. */ detector_description?: string + /** A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. If you specify a value for this property, it is ignored. */ detector_index?: integer + /** If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, you can set `exclude_frequent` to `all` for both fields, or to `by` or `over` for those specific fields. */ exclude_frequent?: MlExcludeFrequent + /** The field that the detector uses in the function. If you use an event rate function such as count or rare, do not specify this field. The `field_name` cannot contain double quotes or backslashes. */ field_name?: Field + /** The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, or `sum`. */ function?: string + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ over_field_name?: Field + /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: Field + /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ use_null?: boolean } export interface MlDetectorRead { + /** The field used to split the data. + * In particular, this property is used for analyzing the splits with respect to their own history. + * It is used for finding unusual values in the context of the split. */ by_field_name?: Field + /** An array of custom rule objects, which enable you to customize the way detectors operate. + * For example, a rule may dictate to the detector conditions under which results should be skipped. + * Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] + /** A description of the detector. */ detector_description?: string + /** A unique identifier for the detector. + * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ detector_index?: integer + /** Contains one of the following values: `all`, `none`, `by`, or `over`. + * If set, frequent entities are excluded from influencing the anomaly results. + * Entities can be considered frequent over time or frequent in a population. + * If you are working with both over and by fields, then you can set `exclude_frequent` to all for both fields, or to `by` or `over` for those specific fields. */ exclude_frequent?: MlExcludeFrequent + /** The field that the detector uses in the function. + * If you use an event rate function such as `count` or `rare`, do not specify this field. */ field_name?: Field + /** The analysis function that is used. + * For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. */ function: string + /** The field used to split the data. + * In particular, this property is used for analyzing the splits with respect to the history of all splits. + * It is used for finding unusual values in the population of all splits. */ over_field_name?: Field + /** The field used to segment the analysis. + * When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: Field + /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ use_null?: boolean } export interface MlDetectorUpdate { + /** A unique identifier for the detector. + * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ detector_index: integer + /** A description of the detector. */ description?: string + /** An array of custom rule objects, which enable you to customize the way detectors operate. + * For example, a rule may dictate to the detector conditions under which results should be skipped. + * Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] } @@ -15752,100 +26566,225 @@ export interface MlExponentialAverageCalculationContext { export type MlFeatureExtractor = MlQueryFeatureExtractor export interface MlFillMaskInferenceOptions { + /** The string/token which will be removed from incoming documents and replaced with the inference prediction(s). + * In a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer + * has a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests. + * However, if this field is present in a request, its value must match the predefined value for that model/tokenizer, + * otherwise the request will fail. */ mask_token?: string + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlFillMaskInferenceUpdateOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlFilter { + /** A description of the filter. */ description?: string + /** A string that uniquely identifies a filter. */ filter_id: Id + /** An array of strings which is the filter item list. */ items: string[] } export interface MlFilterRef { + /** The identifier for the filter. */ filter_id: Id + /** If set to `include`, the rule applies for values in the filter. If set to `exclude`, the rule applies for values not in the filter. */ filter_type?: MlFilterType } export type MlFilterType = 'include' | 'exclude' export interface MlGeoResults { + /** The actual value for the bucket formatted as a `geo_point`. */ actual_point?: string + /** The typical value for the bucket formatted as a `geo_point`. */ typical_point?: string } export interface MlHyperparameter { + /** A positive number showing how much the parameter influences the variation of the loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ absolute_importance?: double + /** Name of the hyperparameter. */ name: Name + /** A number between 0 and 1 showing the proportion of influence on the variation of the loss function among all tuned hyperparameters. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ relative_importance?: double + /** Indicates if the hyperparameter is specified by the user (true) or optimized (false). */ supplied: boolean + /** The value of the hyperparameter, either optimized or specified by the user. */ value: double } export interface MlHyperparameters { + /** Advanced configuration option. + * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. + * This parameter affects loss calculations by acting as a multiplier of the tree depth. + * Higher alpha values result in shallower trees and faster training times. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to zero. */ alpha?: double + /** Advanced configuration option. + * Regularization parameter to prevent overfitting on the training data set. + * Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. + * A high lambda value causes training to favor small leaf weights. + * This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. + * A small lambda value results in large individual trees and slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a nonnegative value. */ lambda?: double + /** Advanced configuration option. + * Regularization parameter to prevent overfitting on the training data set. + * Multiplies a linear penalty associated with the size of individual trees in the forest. + * A high gamma value causes training to prefer small trees. + * A small gamma value results in larger individual trees and slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a nonnegative value. */ gamma?: double + /** Advanced configuration option. + * The shrinkage applied to the weights. + * Smaller values result in larger forests which have a better generalization error. + * However, larger forests cause slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a value between `0.001` and `1`. */ eta?: double + /** Advanced configuration option. + * Specifies the rate at which `eta` increases for each new tree that is added to the forest. + * For example, a rate of 1.05 increases `eta` by 5% for each extra tree. + * By default, this value is calculated during hyperparameter optimization. + * It must be between `0.5` and `2`. */ eta_growth_rate_per_tree?: double + /** Advanced configuration option. + * Defines the fraction of features that will be used when selecting a random bag for each candidate split. + * By default, this value is calculated during hyperparameter optimization. */ feature_bag_fraction?: double + /** Advanced configuration option. + * Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. + * A small value results in the use of a small fraction of the data. + * If this value is set to be less than 1, accuracy typically improves. + * However, too small a value may result in poor convergence for the ensemble and so require more trees. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than zero and less than or equal to 1. */ downsample_factor?: double + /** If the algorithm fails to determine a non-trivial tree (more than a single leaf), this parameter determines how many of such consecutive failures are tolerated. + * Once the number of attempts exceeds the threshold, the forest training stops. */ max_attempts_to_add_tree?: integer + /** Advanced configuration option. + * A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. + * The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. + * By default, this value is calculated during hyperparameter optimization. */ max_optimization_rounds_per_hyperparameter?: integer + /** Advanced configuration option. + * Defines the maximum number of decision trees in the forest. + * The maximum value is 2000. + * By default, this value is calculated during hyperparameter optimization. */ max_trees?: integer + /** The maximum number of folds for the cross-validation procedure. */ num_folds?: integer + /** Determines the maximum number of splits for every feature that can occur in a decision tree when the tree is trained. */ num_splits_per_feature?: integer + /** Advanced configuration option. + * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. + * This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to 0. */ soft_tree_depth_limit?: integer + /** Advanced configuration option. + * This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to 0.01. */ soft_tree_depth_tolerance?: double } export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' export interface MlInferenceConfigCreateContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions + /** Text classification configuration for inference. */ text_classification?: MlTextClassificationInferenceOptions + /** Zeroshot classification configuration for inference. */ zero_shot_classification?: MlZeroShotClassificationInferenceOptions + /** Fill mask configuration for inference. */ fill_mask?: MlFillMaskInferenceOptions learning_to_rank?: MlLearningToRankConfig + /** Named entity recognition configuration for inference. */ ner?: MlNerInferenceOptions + /** Pass through configuration for inference. */ pass_through?: MlPassThroughInferenceOptions + /** Text embedding configuration for inference. */ text_embedding?: MlTextEmbeddingInferenceOptions + /** Text expansion configuration for inference. */ text_expansion?: MlTextExpansionInferenceOptions + /** Question answering configuration for inference. */ question_answering?: MlQuestionAnsweringInferenceOptions } export interface MlInferenceConfigUpdateContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions + /** Text classification configuration for inference. */ text_classification?: MlTextClassificationInferenceUpdateOptions + /** Zeroshot classification configuration for inference. */ zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions + /** Fill mask configuration for inference. */ fill_mask?: MlFillMaskInferenceUpdateOptions + /** Named entity recognition configuration for inference. */ ner?: MlNerInferenceUpdateOptions + /** Pass through configuration for inference. */ pass_through?: MlPassThroughInferenceUpdateOptions + /** Text embedding configuration for inference. */ text_embedding?: MlTextEmbeddingInferenceUpdateOptions + /** Text expansion configuration for inference. */ text_expansion?: MlTextExpansionInferenceUpdateOptions + /** Question answering configuration for inference */ question_answering?: MlQuestionAnsweringInferenceUpdateOptions } export interface MlInferenceResponseResult { + /** If the model is trained for named entity recognition (NER) tasks, the response contains the recognized entities. */ entities?: MlTrainedModelEntities[] + /** Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property + * is present only when it is true. */ is_truncated?: boolean + /** If the model is trained for a text classification or zero shot classification task, the response is the + * predicted class. + * For named entity recognition (NER) tasks, it contains the annotated text output. + * For fill mask tasks, it contains the top prediction for replacing the mask token. + * For text embedding tasks, it contains the raw numerical text embedding values. + * For regression models, its a numerical value + * For classification models, it may be an integer, double, boolean or string depending on prediction type */ predicted_value?: MlPredictedValue | MlPredictedValue[] + /** For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted + * value. + * Additionally */ predicted_value_sequence?: string + /** Specifies a probability for the predicted value. */ prediction_probability?: double + /** Specifies a confidence score for the predicted value. */ prediction_score?: double + /** For fill mask, text classification, and zero shot classification tasks, the response contains a list of top + * class entries. */ top_classes?: MlTopClassEntry[] + /** If the request failed, the response contains the reason for the failure. */ warning?: string + /** The feature importance for the inference results. Relevant only for classification or regression models */ feature_importance?: MlTrainedModelInferenceFeatureImportance[] } @@ -15855,42 +26794,114 @@ export interface MlInfluence { } export interface MlInfluencer { + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated + * across detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new + * data is analyzed. */ influencer_score: double + /** The field name of the influencer. */ influencer_field_name: Field + /** The entity that influenced, contributed to, or was to blame for the anomaly. */ influencer_field_value: string + /** A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors. + * This is the initial value that was calculated at the time the bucket was processed. */ initial_influencer_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high + * precision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly + * interpretation of this value. */ probability: double + /** Internal. This value is always set to `influencer`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** Additional influencer properties are added, depending on the fields being analyzed. For example, if it’s + * analyzing `user_name` as an influencer, a field `user_name` is added to the result document. This + * information enables you to filter the anomaly results more easily. */ foo?: string } export interface MlJob { + /** Advanced configuration option. + * Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_open: boolean + /** The analysis configuration, which specifies how to analyze the data. + * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. + * These limits are approximate and can be set per job. + * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. + * The time between each periodic persistence of the model. + * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. + * The smallest allowed value is 1 hour. */ background_persist_interval?: Duration blocked?: MlJobBlocked create_time?: DateTime + /** Advanced configuration option. + * Contains custom metadata about the job. */ custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies a period of time (in days) after which only the first snapshot per day is retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * Valid values range from 0 to `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long + /** The data description defines the format of the input data when you send data to the job by using the post data API. + * Note that when configuring a datafeed, these properties are automatically set. + * When data is received via the post data API, it is not stored in Elasticsearch. + * Only the results for anomaly detection are retained. */ data_description: MlDataDescription + /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. + * You can associate only one datafeed with each anomaly detection job. */ datafeed_config?: MlDatafeed + /** Indicates that the process of deleting the job is in progress but not yet completed. + * It is only reported when `true`. */ deleting?: boolean + /** A description of the job. */ description?: string + /** If the job closed or failed, this is the time the job finished, otherwise it is `null`. + * This property is informational; you cannot change its value. */ finished_time?: DateTime + /** A list of job groups. + * A job can belong to no groups or many. */ groups?: string[] + /** Identifier for the anomaly detection job. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ job_id: Id + /** Reserved for future use, currently set to `anomaly_detector`. */ job_type?: string + /** The machine learning configuration version number at which the the job was created. */ job_version?: VersionString + /** This advanced configuration option stores model information along with the results. + * It provides a more detailed view into anomaly detection. + * Model plot provides a simplified and indicative view of the model and its bounds. */ model_plot_config?: MlModelPlotConfig model_snapshot_id?: Id + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies the maximum period of time (in days) that snapshots are retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * By default, snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days: long + /** Advanced configuration option. + * The period over which adjustments to the score are applied, as new data is seen. + * The default value is the longer of 30 days or 100 `bucket_spans`. */ renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. + * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ results_index_name: IndexName + /** Advanced configuration option. + * The period of time (in days) that results are retained. + * Age is calculated relative to the timestamp of the latest bucket result. + * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. + * The default value is null, which means all results are retained. + * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. + * Annotations added by users are retained forever. */ results_retention_days?: long } @@ -15902,22 +26913,66 @@ export interface MlJobBlocked { export type MlJobBlockedReason = 'delete' | 'reset' | 'revert' export interface MlJobConfig { + /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_open?: boolean + /** The analysis configuration, which specifies how to analyze the data. + * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. + * These limits are approximate and can be set per job. + * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. + * The time between each periodic persistence of the model. + * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. + * The smallest allowed value is 1 hour. */ background_persist_interval?: Duration + /** Advanced configuration option. + * Contains custom metadata about the job. */ custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies a period of time (in days) after which only the first snapshot per day is retained. + * This period is relative to the timestamp of the most recent snapshot for this job. */ daily_model_snapshot_retention_after_days?: long + /** The data description defines the format of the input data when you send data to the job by using the post data API. + * Note that when configure a datafeed, these properties are automatically set. */ data_description: MlDataDescription + /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. + * You can associate only one datafeed with each anomaly detection job. */ datafeed_config?: MlDatafeedConfig + /** A description of the job. */ description?: string + /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] + /** Identifier for the anomaly detection job. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ job_id?: Id + /** Reserved for future use, currently set to `anomaly_detector`. */ job_type?: string + /** This advanced configuration option stores model information along with the results. + * It provides a more detailed view into anomaly detection. + * Model plot provides a simplified and indicative view of the model and its bounds. */ model_plot_config?: MlModelPlotConfig + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies the maximum period of time (in days) that snapshots are retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * The default value is `10`, which means snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days?: long + /** Advanced configuration option. + * The period over which adjustments to the score are applied, as new data is seen. + * The default value is the longer of 30 days or 100 `bucket_spans`. */ renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. + * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ results_index_name?: IndexName + /** Advanced configuration option. + * The period of time (in days) that results are retained. + * Age is calculated relative to the timestamp of the latest bucket result. + * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. + * The default value is null, which means all results are retained. + * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. + * Annotations added by users are retained forever. */ results_retention_days?: long } @@ -15940,15 +26995,30 @@ export interface MlJobStatistics { } export interface MlJobStats { + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** An object that describes the quantity of input to the job and any related error counts. + * The `data_count` values are cumulative for the lifetime of a job. + * If a model snapshot is reverted or old results are deleted, the job counts are not reset. */ data_counts: MlDataCounts + /** An object that provides statistical information about forecasts belonging to this job. + * Some statistics are omitted if no forecasts have been made. */ forecasts_stats: MlJobForecastStatistics + /** Identifier for the anomaly detection job. */ job_id: string + /** An object that provides information about the size and contents of the model. */ model_size_stats: MlModelSizeStats + /** Contains properties for the node that runs the job. + * This information is available only for open jobs. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNodeCompact + /** For open jobs only, the elapsed time for which the job has been open. */ open_time?: DateTime + /** The status of the anomaly detection job, which can be one of the following values: `closed`, `closing`, `failed`, `opened`, `opening`. */ state: MlJobState + /** An object that provides statistical information about timing aspect of this job. */ timing_stats: MlJobTimingStats + /** Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */ deleting?: boolean } @@ -15989,8 +27059,11 @@ export interface MlModelPackageConfig { } export interface MlModelPlotConfig { + /** If true, enables calculation and storage of the model change annotations for each entity that is being analyzed. */ annotations_enabled?: boolean + /** If true, enables calculation and storage of the model bounds for each entity that is being analyzed. */ enabled?: boolean + /** Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied. Wildcards are not supported. Only the specified terms can be viewed when using the Single Metric Viewer. */ terms?: Field } @@ -16020,15 +27093,25 @@ export interface MlModelSizeStats { } export interface MlModelSnapshot { + /** An optional description of the job. */ description?: string + /** A numerical character string that uniquely identifies the job that the snapshot was created for. */ job_id: Id + /** The timestamp of the latest processed record. */ latest_record_time_stamp?: integer + /** The timestamp of the latest bucket result. */ latest_result_time_stamp?: integer + /** The minimum version required to be able to restore the model snapshot. */ min_version: VersionString + /** Summary information describing the model. */ model_size_stats?: MlModelSizeStats + /** If true, this snapshot will not be deleted during automatic cleanup of snapshots older than model_snapshot_retention_days. However, this snapshot will be deleted when the job is deleted. The default value is false. */ retain: boolean + /** For internal use only. */ snapshot_doc_count: long + /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id + /** The creation timestamp for the snapshot. */ timestamp: long } @@ -16036,19 +27119,25 @@ export interface MlModelSnapshotUpgrade { job_id: Id snapshot_id: Id state: MlSnapshotUpgradeState + /** @remarks This property is not supported on Elastic Cloud Serverless. */ node: MlDiscoveryNode assignment_explanation: string } export interface MlNerInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The token classification labels. Must be IOB formatted tags */ classification_labels?: string[] vocabulary?: MlVocabulary } export interface MlNerInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } @@ -16056,30 +27145,52 @@ export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig } export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { + /** Should the tokenizer prefix input with a space character */ add_prefix_space?: boolean } export interface MlNlpTokenizationUpdateOptions { + /** Truncate options to apply */ truncate?: MlTokenizationTruncate + /** Span options to apply */ span?: integer } export interface MlOutlierDetectionParameters { + /** Specifies whether the feature influence calculation is enabled. */ compute_feature_influence?: boolean + /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. + * Value range: 0-1 */ feature_influence_threshold?: double + /** The method that outlier detection uses. + * Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. + * The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ method?: string + /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. + * When the value is not set, different values are used for different ensemble members. + * This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ n_neighbors?: integer + /** The proportion of the data set that is assumed to be outlying prior to outlier detection. + * For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ outlier_fraction?: double + /** If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). */ standardization_enabled?: boolean } export interface MlOverallBucket { + /** The length of the bucket in seconds. Matches the job with the longest bucket_span value. */ bucket_span: DurationValue + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** An array of objects that contain the max_anomaly_score per job_id. */ jobs: MlOverallBucketJob[] + /** The top_n average of the maximum bucket anomaly_score per job. */ overall_score: double + /** Internal. This is always set to overall_bucket. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The start time of the bucket for which these results were calculated. */ timestamp_string?: DateTime } @@ -16089,23 +27200,31 @@ export interface MlOverallBucketJob { } export interface MlPage { + /** Skips the specified number of items. */ from?: integer + /** Specifies the maximum number of items to obtain. */ size?: integer } export interface MlPassThroughInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary?: MlVocabulary } export interface MlPassThroughInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlPerPartitionCategorization { + /** To enable this setting, you must also set the `partition_field_name` property to the same value in every detector that uses the keyword `mlcategory`. Otherwise, job creation fails. */ enabled?: boolean + /** This setting can be set to true only if per-partition categorization is enabled. If true, both categorization and subsequent anomaly detection stops for partitions where the categorization status changes to warn. This setting makes it viable to have a job where it is expected that categorization works well for some partitions but not others; you do not pay the cost of bad categorization forever in the partitions where it works badly. */ stop_on_warn?: boolean } @@ -16118,22 +27237,33 @@ export interface MlQueryFeatureExtractor { } export interface MlQuestionAnsweringInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The maximum answer length to consider */ max_answer_length?: integer } export interface MlQuestionAnsweringInferenceUpdateOptions { + /** The question to answer given the inference context */ question: string + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The maximum answer length to consider for extraction */ max_answer_length?: integer } export interface MlRegressionInferenceOptions { + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: Field + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer } @@ -16142,67 +27272,95 @@ export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'st export type MlRuleAction = 'skip_result' | 'skip_model_update' export interface MlRuleCondition { + /** Specifies the result property to which the condition applies. If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can only specify conditions that apply to time. */ applies_to: MlAppliesTo + /** Specifies the condition operator. The available options are greater than, greater than or equals, less than, and less than or equals. */ operator: MlConditionOperator + /** The value that is compared against the `applies_to` field using the operator. */ value: double } export interface MlRunningStateSearchInterval { + /** The end time. */ end?: Duration + /** The end time as an epoch in milliseconds. */ end_ms: DurationValue + /** The start time. */ start?: Duration + /** The start time as an epoch in milliseconds. */ start_ms: DurationValue } export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' export interface MlTextClassificationInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ classification_labels?: string[] vocabulary?: MlVocabulary } export interface MlTextClassificationInferenceUpdateOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ classification_labels?: string[] } export interface MlTextEmbeddingInferenceOptions { + /** The number of dimensions in the embedding output */ embedding_size?: integer + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlTextEmbeddingInferenceUpdateOptions { tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlTextExpansionInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlTextExpansionInferenceUpdateOptions { tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlTimingStats { + /** Runtime of the analysis in milliseconds. */ elapsed_time: DurationValue + /** Runtime of the latest iteration of the analysis in milliseconds. */ iteration_time?: DurationValue } export interface MlTokenizationConfigContainer { + /** Indicates BERT tokenization and its options */ bert?: MlNlpBertTokenizationConfig + /** Indicates BERT Japanese tokenization and its options */ bert_ja?: MlNlpBertTokenizationConfig + /** Indicates MPNET tokenization and its options */ mpnet?: MlNlpBertTokenizationConfig + /** Indicates RoBERTa tokenization and its options */ roberta?: MlNlpRobertaTokenizationConfig xlm_roberta?: MlXlmRobertaTokenizationConfig } @@ -16216,73 +27374,114 @@ export interface MlTopClassEntry { } export interface MlTotalFeatureImportance { + /** The feature for which this importance was calculated. */ feature_name: Name + /** A collection of feature importance statistics related to the training data set for this particular feature. */ importance: MlTotalFeatureImportanceStatistics[] + /** If the trained model is a classification model, feature importance statistics are gathered per target class value. */ classes: MlTotalFeatureImportanceClass[] } export interface MlTotalFeatureImportanceClass { + /** The target class value. Could be a string, boolean, or number. */ class_name: Name + /** A collection of feature importance statistics related to the training data set for this particular feature. */ importance: MlTotalFeatureImportanceStatistics[] } export interface MlTotalFeatureImportanceStatistics { + /** The average magnitude of this feature across all the training data. This value is the average of the absolute values of the importance for this feature. */ mean_magnitude: double + /** The maximum importance value across all the training data for this feature. */ max: integer + /** The minimum importance value across all the training data for this feature. */ min: integer } export interface MlTrainedModelAssignment { adaptive_allocations?: MlAdaptiveAllocationsSettings | null + /** The overall assignment state. */ assignment_state: MlDeploymentAssignmentState max_assigned_allocations?: integer reason?: string + /** The allocation state for each node. */ routing_table: Record + /** The timestamp when the deployment started. */ start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters } export interface MlTrainedModelAssignmentRoutingStateAndReason { + /** The reason for the current state. It is usually populated only when the + * `routing_state` is `failed`. */ reason?: string + /** The current routing state. */ routing_state: MlRoutingState } export interface MlTrainedModelAssignmentRoutingTable { + /** The reason for the current state. It is usually populated only when the + * `routing_state` is `failed`. */ reason?: string + /** The current routing state. */ routing_state: MlRoutingState + /** Current number of allocations. */ current_allocations: integer + /** Target number of allocations. */ target_allocations: integer } export interface MlTrainedModelAssignmentTaskParameters { + /** The size of the trained model in bytes. */ model_bytes: ByteSize + /** The unique identifier for the trained model. */ model_id: Id + /** The unique identifier for the trained model deployment. */ deployment_id: Id + /** The size of the trained model cache. */ cache_size?: ByteSize + /** The total number of allocations this model is assigned across ML nodes. */ number_of_allocations: integer priority: MlTrainingPriority per_deployment_memory_bytes: ByteSize per_allocation_memory_bytes: ByteSize + /** Number of inference requests are allowed in the queue at a time. */ queue_capacity: integer + /** Number of threads per allocation. */ threads_per_allocation: integer } export interface MlTrainedModelConfig { + /** Identifier for the trained model. */ model_id: Id + /** The model type */ model_type?: MlTrainedModelType + /** A comma delimited string of tags. A trained model can have many tags, or none. */ tags: string[] + /** The Elasticsearch version number in which the trained model was created. */ version?: VersionString compressed_definition?: string + /** Information on the creator of the trained model. */ created_by?: string + /** The time when the trained model was created. */ create_time?: DateTime + /** Any field map described in the inference configuration takes precedence. */ default_field_map?: Record + /** The free-text description of the trained model. */ description?: string + /** The estimated heap usage in bytes to keep the trained model in memory. */ estimated_heap_memory_usage_bytes?: integer + /** The estimated number of operations to use the trained model. */ estimated_operations?: integer + /** True if the full model definition is present. */ fully_defined?: boolean + /** The default configuration for inference. This can be either a regression, classification, or one of the many NLP focused configurations. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer + /** The input field names for the model definition. */ input: MlTrainedModelConfigInput + /** The license level of the trained model. */ license_level?: string + /** An object containing metadata about the trained model. For example, models created by data frame analytics contain analysis_config and input objects. */ metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize model_package?: MlModelPackageConfig @@ -16292,61 +27491,101 @@ export interface MlTrainedModelConfig { } export interface MlTrainedModelConfigInput { + /** An array of input field names for the model. */ field_names: Field[] } export interface MlTrainedModelConfigMetadata { model_aliases?: string[] + /** An object that contains the baseline for feature importance values. For regression analysis, it is a single value. For classification analysis, there is a value for each class. */ feature_importance_baseline?: Record + /** List of the available hyperparameters optimized during the fine_parameter_tuning phase as well as specified by the user. */ hyperparameters?: MlHyperparameter[] + /** An array of the total feature importance for each feature used from the training data set. This array of objects is returned if data frame analytics trained the model and the request includes total_feature_importance in the include request parameter. */ total_feature_importance?: MlTotalFeatureImportance[] } export interface MlTrainedModelDeploymentAllocationStatus { + /** The current number of nodes where the model is allocated. */ allocation_count: integer + /** The detailed allocation state related to the nodes. */ state: MlDeploymentAllocationState + /** The desired number of nodes for model allocation. */ target_allocation_count: integer } export interface MlTrainedModelDeploymentNodesStats { + /** The average time for each inference call to complete on this node. */ average_inference_time_ms?: DurationValue average_inference_time_ms_last_minute?: DurationValue + /** The average time for each inference call to complete on this node, excluding cache */ average_inference_time_ms_excluding_cache_hits?: DurationValue + /** The number of errors when evaluating the trained model. */ error_count?: integer + /** The total number of inference calls made against this node for this model. */ inference_count?: long inference_cache_hit_count?: long inference_cache_hit_count_last_minute?: long + /** The epoch time stamp of the last inference call for the model on this node. */ last_access?: EpochTime + /** Information pertaining to the node. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNode + /** The number of allocations assigned to this node. */ number_of_allocations?: integer + /** The number of inference requests queued to be processed. */ number_of_pending_requests?: integer peak_throughput_per_minute: long + /** The number of inference requests that were not processed because the queue was full. */ rejected_execution_count?: integer + /** The current routing state and reason for the current routing state for this allocation. */ routing_state: MlTrainedModelAssignmentRoutingStateAndReason + /** The epoch timestamp when the allocation started. */ start_time?: EpochTime + /** The number of threads used by each allocation during inference. */ threads_per_allocation?: integer throughput_last_minute: integer + /** The number of inference requests that timed out before being processed. */ timeout_count?: integer } export interface MlTrainedModelDeploymentStats { adaptive_allocations?: MlAdaptiveAllocationsSettings + /** The detailed allocation status for the deployment. */ allocation_status?: MlTrainedModelDeploymentAllocationStatus cache_size?: ByteSize + /** The unique identifier for the trained model deployment. */ deployment_id: Id + /** The sum of `error_count` for all nodes in the deployment. */ error_count?: integer + /** The sum of `inference_count` for all nodes in the deployment. */ inference_count?: integer + /** The unique identifier for the trained model. */ model_id: Id + /** The deployment stats for each node that currently has the model allocated. + * In serverless, stats are reported for a single unnamed virtual node. */ nodes: MlTrainedModelDeploymentNodesStats[] + /** The number of allocations requested. */ number_of_allocations?: integer peak_throughput_per_minute: long priority: MlTrainingPriority + /** The number of inference requests that can be queued before new requests are rejected. */ queue_capacity?: integer + /** The sum of `rejected_execution_count` for all nodes in the deployment. + * Individual nodes reject an inference request if the inference queue is full. + * The queue size is controlled by the `queue_capacity` setting in the start + * trained model deployment API. */ rejected_execution_count?: integer + /** The reason for the current deployment state. Usually only populated when + * the model is not deployed to a node. */ reason?: string + /** The epoch timestamp when the deployment started. */ start_time: EpochTime + /** The overall state of the deployment. */ state?: MlDeploymentAssignmentState + /** The number of threads used be each allocation during inference. */ threads_per_allocation?: integer + /** The sum of `timeout_count` for all nodes in the deployment. */ timeout_count?: integer } @@ -16370,10 +27609,19 @@ export interface MlTrainedModelInferenceFeatureImportance { } export interface MlTrainedModelInferenceStats { + /** The number of times the model was loaded for inference and was not retrieved from the cache. + * If this number is close to the `inference_count`, the cache is not being appropriately used. + * This can be solved by increasing the cache size or its time-to-live (TTL). + * Refer to general machine learning settings for the appropriate settings. */ cache_miss_count: integer + /** The number of failures when using the model for inference. */ failure_count: integer + /** The total number of times the model has been called for inference. + * This is across all inference contexts, including all pipelines. */ inference_count: integer + /** The number of inference calls where all the training features for the model were missing. */ missing_all_fields_count: integer + /** The time when the statistics were last updated. */ timestamp: EpochTime } @@ -16386,21 +27634,33 @@ export interface MlTrainedModelLocationIndex { } export interface MlTrainedModelPrefixStrings { + /** String prepended to input at ingest */ ingest?: string + /** String prepended to input at search */ search?: string } export interface MlTrainedModelSizeStats { + /** The size of the model in bytes. */ model_size_bytes: ByteSize + /** The amount of memory required to load the model in bytes. */ required_native_memory_bytes: ByteSize } export interface MlTrainedModelStats { + /** A collection of deployment stats, which is present when the models are deployed. */ deployment_stats?: MlTrainedModelDeploymentStats + /** A collection of inference stats fields. */ inference_stats?: MlTrainedModelInferenceStats + /** A collection of ingest stats for the model across all nodes. + * The values are summations of the individual node statistics. + * The format matches the ingest section in the nodes stats API. */ ingest?: Record + /** The unique identifier of the trained model. */ model_id: Id + /** A collection of model size stats. */ model_size_stats: MlTrainedModelSizeStats + /** The number of ingest pipelines that currently refer to the model. */ pipeline_count: integer } @@ -16409,13 +27669,18 @@ export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' export type MlTrainingPriority = 'normal' | 'low' export interface MlTransformAuthorization { + /** If an API key was used for the most recent update to the transform, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the transform, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the transform, the account name is listed in the response. */ service_account?: string } export interface MlValidationLoss { + /** Validation loss values for every added decision tree during the forest growing procedure. */ fold_values: string[] + /** The type of the loss metric. For example, binomial_logistic. */ loss_type: string } @@ -16427,23 +27692,39 @@ export interface MlXlmRobertaTokenizationConfig extends MlCommonTokenizationConf } export interface MlZeroShotClassificationInferenceOptions { + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** Hypothesis template used when tokenizing labels for prediction */ hypothesis_template?: string + /** The zero shot classification labels indicating entailment, neutral, and contradiction + * Must contain exactly and only entailment, neutral, and contradiction */ classification_labels: string[] + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Indicates if more than one true label exists. */ multi_label?: boolean + /** The labels to predict. */ labels?: string[] } export interface MlZeroShotClassificationInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Update the configured multi label option. Indicates if more than one true label exists. Defaults to the configured value. */ multi_label?: boolean + /** The labels to predict. */ labels: string[] } export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { + /** The unique identifier of the trained model. */ model_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never } } export interface MlClearTrainedModelDeploymentCacheResponse { @@ -16451,10 +27732,18 @@ export interface MlClearTrainedModelDeploymentCacheResponse { } export interface MlCloseJobRequest extends RequestBase { + /** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. */ job_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean + /** Refer to the descriptiion for the `force` query parameter. */ force?: boolean + /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, force?: never, timeout?: never } } export interface MlCloseJobResponse { @@ -16462,48 +27751,96 @@ export interface MlCloseJobResponse { } export interface MlDeleteCalendarRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never } } export type MlDeleteCalendarResponse = AcknowledgedResponseBase export interface MlDeleteCalendarEventRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** Identifier for the scheduled event. + * You can obtain this identifier by using the get calendar events API. */ event_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, event_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, event_id?: never } } export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase export interface MlDeleteCalendarJobRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a + * comma-separated list of jobs or groups. */ job_id: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_id?: never } } export interface MlDeleteCalendarJobResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. */ id: Id + /** If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. */ force?: boolean + /** The time to wait for the job to be deleted. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, force?: never, timeout?: never } } export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase export interface MlDeleteDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. This + * identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It must start and end with alphanumeric + * characters. */ datafeed_id: Id + /** Use to forcefully delete a started datafeed; this method is quicker than + * stopping and deleting the datafeed. */ force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, force?: never } } export type MlDeleteDatafeedResponse = AcknowledgedResponseBase export interface MlDeleteExpiredDataRequest extends RequestBase { + /** Identifier for an anomaly detection job. It can be a job identifier, a + * group name, or a wildcard expression. */ job_id?: Id + /** The desired requests per second for the deletion processes. The default + * behavior is no throttling. */ requests_per_second?: float + /** How long can the underlying delete processes run until they are canceled. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, requests_per_second?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, requests_per_second?: never, timeout?: never } } export interface MlDeleteExpiredDataResponse { @@ -16511,55 +27848,124 @@ export interface MlDeleteExpiredDataResponse { } export interface MlDeleteFilterRequest extends RequestBase { + /** A string that uniquely identifies a filter. */ filter_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never } } export type MlDeleteFilterResponse = AcknowledgedResponseBase export interface MlDeleteForecastRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** A comma-separated list of forecast identifiers. If you do not specify + * this optional parameter or if you specify `_all` or `*` the API deletes + * all forecasts from the job. */ forecast_id?: Id + /** Specifies whether an error occurs when there are no forecasts. In + * particular, if this parameter is set to `false` and there are no + * forecasts associated with the job, attempts to delete all forecasts + * return an error. */ allow_no_forecasts?: boolean + /** Specifies the period of time to wait for the completion of the delete + * operation. When this period of time elapses, the API fails and returns an + * error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, forecast_id?: never, allow_no_forecasts?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, forecast_id?: never, allow_no_forecasts?: never, timeout?: never } } export type MlDeleteForecastResponse = AcknowledgedResponseBase export interface MlDeleteJobRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** Use to forcefully delete an opened job; this method is quicker than + * closing and deleting the job. */ force?: boolean + /** Specifies whether annotations that have been added by the + * user should be deleted along with any auto-generated annotations when the job is + * reset. */ delete_user_annotations?: boolean + /** Specifies whether the request should return immediately or wait until the + * job deletion completes. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, force?: never, delete_user_annotations?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, force?: never, delete_user_annotations?: never, wait_for_completion?: never } } export type MlDeleteJobResponse = AcknowledgedResponseBase export interface MlDeleteModelSnapshotRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** Identifier for the model snapshot. */ snapshot_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never } } export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { + /** The unique identifier of the trained model. */ model_id: Id + /** Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. */ force?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, force?: never, timeout?: never } } export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelAliasRequest extends RequestBase { + /** The model alias to delete. */ model_alias: Name + /** The trained model ID to which the model alias refers. */ model_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_alias?: never, model_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_alias?: never, model_id?: never } } export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlEstimateModelMemoryRequest extends RequestBase { + /** For a list of the properties that you can specify in the + * `analysis_config` component of the body of this API. */ analysis_config?: MlAnalysisConfig + /** Estimates of the highest cardinality in a single bucket that is observed + * for influencer fields over the time period that the job analyzes data. + * To produce a good answer, values must be provided for all influencer + * fields. Providing values for fields that are not listed as `influencers` + * has no effect on the estimation. */ max_bucket_cardinality?: Record + /** Estimates of the cardinality that is observed for fields over the whole + * time period that the job analyzes data. To produce a good answer, values + * must be provided for fields referenced in the `by_field_name`, + * `over_field_name` and `partition_field_name` of any detectors. Providing + * values for other fields has no effect on the estimation. It can be + * omitted from the request if no detectors have a `by_field_name`, + * `over_field_name` or `partition_field_name`. */ overall_cardinality?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { analysis_config?: never, max_bucket_cardinality?: never, overall_cardinality?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { analysis_config?: never, max_bucket_cardinality?: never, overall_cardinality?: never } } export interface MlEstimateModelMemoryResponse { @@ -16579,17 +27985,27 @@ export interface MlEvaluateDataFrameConfusionMatrixPrediction { } export interface MlEvaluateDataFrameConfusionMatrixThreshold { + /** True Positive */ tp: integer + /** False Positive */ fp: integer + /** True Negative */ tn: integer + /** False Negative */ fn: integer } export interface MlEvaluateDataFrameDataframeClassificationSummary { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. + * It is calculated for a specific class (provided as "class_name") treated as positive. */ auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + /** Accuracy of predictions (per-class and overall). */ accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy + /** Multiclass confusion matrix. */ multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix + /** Precision of predictions (per-class and average). */ precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision + /** Recall of predictions (per-class and average). */ recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall } @@ -16632,67 +28048,141 @@ export interface MlEvaluateDataFrameDataframeEvaluationValue { } export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. */ auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + /** Set the different thresholds of the outlier score at where the metric is calculated. */ precision?: Record + /** Set the different thresholds of the outlier score at where the metric is calculated. */ recall?: Record + /** Set the different thresholds of the outlier score at where the metrics (`tp` - true positive, `fp` - false positive, `tn` - true negative, `fn` - false negative) are calculated. */ confusion_matrix?: Record } export interface MlEvaluateDataFrameDataframeRegressionSummary { + /** Pseudo Huber loss function. */ huber?: MlEvaluateDataFrameDataframeEvaluationValue + /** Average squared difference between the predicted values and the actual (`ground truth`) value. */ mse?: MlEvaluateDataFrameDataframeEvaluationValue + /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (`ground truth`) value. */ msle?: MlEvaluateDataFrameDataframeEvaluationValue + /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ r_squared?: MlEvaluateDataFrameDataframeEvaluationValue } export interface MlEvaluateDataFrameRequest extends RequestBase { + /** Defines the type of evaluation you want to perform. */ evaluation: MlDataframeEvaluationContainer + /** Defines the `index` in which the evaluation will be performed. */ index: IndexName + /** A query clause that retrieves a subset of data from the source index. */ query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { evaluation?: never, index?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { evaluation?: never, index?: never, query?: never } } export interface MlEvaluateDataFrameResponse { + /** Evaluation results for a classification analysis. + * It outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlEvaluateDataFrameDataframeClassificationSummary + /** Evaluation results for an outlier detection analysis. + * It outputs the probability that each document is an outlier. */ outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary + /** Evaluation results for a regression analysis which outputs a prediction of values. */ regression?: MlEvaluateDataFrameDataframeRegressionSummary } export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id?: Id + /** The configuration of how to source the analysis data. It requires an + * index. Optionally, query and _source may be specified. */ source?: MlDataframeAnalyticsSource + /** The destination configuration, consisting of index and optionally + * results_field (ml by default). */ dest?: MlDataframeAnalyticsDestination + /** The analysis configuration, which contains the information necessary to + * perform one of the following types of analysis: classification, outlier + * detection, or regression. */ analysis?: MlDataframeAnalysisContainer + /** A description of the job. */ description?: string + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to + * create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer + /** Specify includes and/or excludes patterns to select which fields will be + * included in the analysis. The patterns specified in excludes are applied + * last, therefore excludes takes precedence. In other words, if the same + * field is specified in both includes and excludes, then the field will not + * be included in the analysis. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, source?: never, dest?: never, analysis?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, analyzed_fields?: never, allow_lazy_start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, source?: never, dest?: never, analysis?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, analyzed_fields?: never, allow_lazy_start?: never } } export interface MlExplainDataFrameAnalyticsResponse { + /** An array of objects that explain selection for each field, sorted by the field names. */ field_selection: MlDataframeAnalyticsFieldSelection[] + /** An array of objects that explain selection for each field, sorted by the field names. */ memory_estimation: MlDataframeAnalyticsMemoryEstimation } export interface MlFlushJobRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** Refer to the description for the `advance_time` query parameter. */ advance_time?: DateTime + /** Refer to the description for the `calc_interim` query parameter. */ calc_interim?: boolean + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `skip_time` query parameter. */ skip_time?: DateTime + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, advance_time?: never, calc_interim?: never, end?: never, skip_time?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, advance_time?: never, calc_interim?: never, end?: never, skip_time?: never, start?: never } } export interface MlFlushJobResponse { flushed: boolean + /** Provides the timestamp (in milliseconds since the epoch) of the end of + * the last bucket that was processed. */ last_finalized_bucket_end?: integer } export interface MlForecastRequest extends RequestBase { + /** Identifier for the anomaly detection job. The job must be open when you + * create a forecast; otherwise, an error occurs. */ job_id: Id + /** Refer to the description for the `duration` query parameter. */ duration?: Duration + /** Refer to the description for the `expires_in` query parameter. */ expires_in?: Duration + /** Refer to the description for the `max_model_memory` query parameter. */ max_model_memory?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, duration?: never, expires_in?: never, max_model_memory?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, duration?: never, expires_in?: never, max_model_memory?: never } } export interface MlForecastResponse { @@ -16701,18 +28191,34 @@ export interface MlForecastResponse { } export interface MlGetBucketsRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** The timestamp of a single bucket result. If you do not specify this + * parameter, the API returns information about all buckets. */ timestamp?: DateTime + /** Skips the specified number of buckets. */ from?: integer + /** Specifies the maximum number of buckets to obtain. */ size?: integer + /** Refer to the description for the `anomaly_score` query parameter. */ anomaly_score?: double + /** Refer to the description for the `desc` query parameter. */ desc?: boolean + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean + /** Refer to the description for the `expand` query parameter. */ expand?: boolean page?: MlPage + /** Refer to the desription for the `sort` query parameter. */ sort?: Field + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, timestamp?: never, from?: never, size?: never, anomaly_score?: never, desc?: never, end?: never, exclude_interim?: never, expand?: never, page?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, timestamp?: never, from?: never, size?: never, anomaly_score?: never, desc?: never, end?: never, exclude_interim?: never, expand?: never, page?: never, sort?: never, start?: never } } export interface MlGetBucketsResponse { @@ -16721,12 +28227,22 @@ export interface MlGetBucketsResponse { } export interface MlGetCalendarEventsRequest extends RequestBase { + /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id: Id + /** Specifies to get events with timestamps earlier than this time. */ end?: DateTime + /** Skips the specified number of events. */ from?: integer + /** Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. */ job_id?: Id + /** Specifies the maximum number of events to obtain. */ size?: integer + /** Specifies to get events with timestamps after this time. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, end?: never, from?: never, job_id?: never, size?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, end?: never, from?: never, job_id?: never, size?: never, start?: never } } export interface MlGetCalendarEventsResponse { @@ -16735,16 +28251,27 @@ export interface MlGetCalendarEventsResponse { } export interface MlGetCalendarsCalendar { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** An array of anomaly detection job identifiers. */ job_ids: Id[] } export interface MlGetCalendarsRequest extends RequestBase { + /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id?: Id + /** Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. */ from?: integer + /** Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. */ size?: integer + /** This object is supported only when you omit the calendar identifier. */ page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, from?: never, size?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, from?: never, size?: never, page?: never } } export interface MlGetCalendarsResponse { @@ -16753,12 +28280,27 @@ export interface MlGetCalendarsResponse { } export interface MlGetCategoriesRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** Identifier for the category, which is unique in the job. If you specify + * neither the category ID nor the partition_field_value, the API returns + * information about all categories. If you specify only the + * partition_field_value, it returns information about all categories for + * the specified partition. */ category_id?: CategoryId + /** Skips the specified number of categories. */ from?: integer + /** Only return categories for the specified partition. */ partition_field_value?: string + /** Specifies the maximum number of categories to obtain. */ size?: integer + /** Configures pagination. + * This parameter has the `from` and `size` properties. */ page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, category_id?: never, from?: never, partition_field_value?: never, size?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, category_id?: never, from?: never, partition_field_value?: never, size?: never, page?: never } } export interface MlGetCategoriesResponse { @@ -16767,34 +28309,97 @@ export interface MlGetCategoriesResponse { } export interface MlGetDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. If you do not specify this + * option, the API returns information for the first hundred data frame + * analytics jobs. */ id?: Id + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value returns an empty data_frame_analytics array when there + * are no matches and the subset of results when there are partial matches. + * If this parameter is `false`, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of data frame analytics jobs. */ from?: integer + /** Specifies the maximum number of data frame analytics jobs to obtain. */ size?: integer + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } } export interface MlGetDataFrameAnalyticsResponse { count: integer + /** An array of data frame analytics job resources, which are sorted by the id value in ascending order. */ data_frame_analytics: MlDataframeAnalyticsSummary[] } export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { + /** Identifier for the data frame analytics job. If you do not specify this + * option, the API returns information for the first hundred data frame + * analytics jobs. */ id?: Id + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value returns an empty data_frame_analytics array when there + * are no matches and the subset of results when there are partial matches. + * If this parameter is `false`, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of data frame analytics jobs. */ from?: integer + /** Specifies the maximum number of data frame analytics jobs to obtain. */ size?: integer + /** Defines whether the stats response should be verbose. */ verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, verbose?: never } } export interface MlGetDataFrameAnalyticsStatsResponse { count: long + /** An array of objects that contain usage information for data frame analytics jobs, which are sorted by the id value in ascending order. */ data_frame_analytics: MlDataframeAnalytics[] } export interface MlGetDatafeedStatsRequest extends RequestBase { + /** Identifier for the datafeed. It can be a datafeed identifier or a + * wildcard expression. If you do not specify one of these options, the API + * returns information about all datafeeds. */ datafeed_id?: Ids + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no datafeeds that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `datafeeds` array + * when there are no matches and the subset of results when there are + * partial matches. If this parameter is `false`, the request returns a + * `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never } } export interface MlGetDatafeedStatsResponse { @@ -16803,9 +28408,29 @@ export interface MlGetDatafeedStatsResponse { } export interface MlGetDatafeedsRequest extends RequestBase { + /** Identifier for the datafeed. It can be a datafeed identifier or a + * wildcard expression. If you do not specify one of these options, the API + * returns information about all datafeeds. */ datafeed_id?: Ids + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no datafeeds that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `datafeeds` array + * when there are no matches and the subset of results when there are + * partial matches. If this parameter is `false`, the request returns a + * `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, exclude_generated?: never } } export interface MlGetDatafeedsResponse { @@ -16814,9 +28439,16 @@ export interface MlGetDatafeedsResponse { } export interface MlGetFiltersRequest extends RequestBase { + /** A string that uniquely identifies a filter. */ filter_id?: Ids + /** Skips the specified number of filters. */ from?: integer + /** Specifies the maximum number of filters to obtain. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, from?: never, size?: never } } export interface MlGetFiltersResponse { @@ -16825,26 +28457,66 @@ export interface MlGetFiltersResponse { } export interface MlGetInfluencersRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** If true, the results are sorted in descending order. */ desc?: boolean + /** Returns influencers with timestamps earlier than this time. + * The default value means it is unset and results are not limited to + * specific timestamps. */ end?: DateTime + /** If true, the output excludes interim results. By default, interim results + * are included. */ exclude_interim?: boolean + /** Returns influencers with anomaly scores greater than or equal to this + * value. */ influencer_score?: double + /** Skips the specified number of influencers. */ from?: integer + /** Specifies the maximum number of influencers to obtain. */ size?: integer + /** Specifies the sort field for the requested influencers. By default, the + * influencers are sorted by the `influencer_score` value. */ sort?: Field + /** Returns influencers with timestamps after this time. The default value + * means it is unset and results are not limited to specific timestamps. */ start?: DateTime + /** Configures pagination. + * This parameter has the `from` and `size` properties. */ page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, desc?: never, end?: never, exclude_interim?: never, influencer_score?: never, from?: never, size?: never, sort?: never, start?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, desc?: never, end?: never, exclude_interim?: never, influencer_score?: never, from?: never, size?: never, sort?: never, start?: never, page?: never } } export interface MlGetInfluencersResponse { count: long + /** Array of influencer objects */ influencers: MlInfluencer[] } export interface MlGetJobStatsRequest extends RequestBase { + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, a comma-separated list of jobs, or a wildcard expression. If + * you do not specify one of these options, the API returns information for + * all anomaly detection jobs. */ job_id?: Id + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty `jobs` array when + * there are no matches and the subset of results when there are partial + * matches. If `false`, the API returns a `404` status + * code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never } } export interface MlGetJobStatsResponse { @@ -16853,9 +28525,29 @@ export interface MlGetJobStatsResponse { } export interface MlGetJobsRequest extends RequestBase { + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, or a wildcard expression. If you do not specify one of these + * options, the API returns information for all anomaly detection jobs. */ job_id?: Ids + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `jobs` array when + * there are no matches and the subset of results when there are partial + * matches. If this parameter is `false`, the request returns a `404` status + * code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, exclude_generated?: never } } export interface MlGetJobsResponse { @@ -16864,49 +28556,87 @@ export interface MlGetJobsResponse { } export interface MlGetMemoryStatsJvmStats { + /** Maximum amount of memory available for use by the heap. */ heap_max?: ByteSize + /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes: integer + /** Amount of Java heap currently being used for caching inference models. */ java_inference?: ByteSize + /** Amount of Java heap, in bytes, currently being used for caching inference models. */ java_inference_in_bytes: integer + /** Maximum amount of Java heap to be used for caching inference models. */ java_inference_max?: ByteSize + /** Maximum amount of Java heap, in bytes, to be used for caching inference models. */ java_inference_max_in_bytes: integer } export interface MlGetMemoryStatsMemMlStats { + /** Amount of native memory set aside for anomaly detection jobs. */ anomaly_detectors?: ByteSize + /** Amount of native memory, in bytes, set aside for anomaly detection jobs. */ anomaly_detectors_in_bytes: integer + /** Amount of native memory set aside for data frame analytics jobs. */ data_frame_analytics?: ByteSize + /** Amount of native memory, in bytes, set aside for data frame analytics jobs. */ data_frame_analytics_in_bytes: integer + /** Maximum amount of native memory (separate to the JVM heap) that may be used by machine learning native processes. */ max?: ByteSize + /** Maximum amount of native memory (separate to the JVM heap), in bytes, that may be used by machine learning native processes. */ max_in_bytes: integer + /** Amount of native memory set aside for loading machine learning native code shared libraries. */ native_code_overhead?: ByteSize + /** Amount of native memory, in bytes, set aside for loading machine learning native code shared libraries. */ native_code_overhead_in_bytes: integer + /** Amount of native memory set aside for trained models that have a PyTorch model_type. */ native_inference?: ByteSize + /** Amount of native memory, in bytes, set aside for trained models that have a PyTorch model_type. */ native_inference_in_bytes: integer } export interface MlGetMemoryStatsMemStats { + /** If the amount of physical memory has been overridden using the es.total_memory_bytes system property + * then this reports the overridden value. Otherwise it reports the same value as total. */ adjusted_total?: ByteSize + /** If the amount of physical memory has been overridden using the `es.total_memory_bytes` system property + * then this reports the overridden value in bytes. Otherwise it reports the same value as `total_in_bytes`. */ adjusted_total_in_bytes: integer + /** Total amount of physical memory. */ total?: ByteSize + /** Total amount of physical memory in bytes. */ total_in_bytes: integer + /** Contains statistics about machine learning use of native memory on the node. */ ml: MlGetMemoryStatsMemMlStats } export interface MlGetMemoryStatsMemory { attributes: Record + /** Contains Java Virtual Machine (JVM) statistics for the node. */ jvm: MlGetMemoryStatsJvmStats + /** Contains statistics about memory usage for the node. */ mem: MlGetMemoryStatsMemStats + /** Human-readable identifier for the node. Based on the Node name setting setting. */ name: Name + /** Roles assigned to the node. */ roles: string[] + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress ephemeral_id: Id } export interface MlGetMemoryStatsRequest extends RequestBase { + /** The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or + * `ml:true` */ node_id?: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout + * expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request + * fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } } export interface MlGetMemoryStatsResponse { @@ -16916,9 +28646,26 @@ export interface MlGetMemoryStatsResponse { } export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple + * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id: Id + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no jobs that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * The default value is true, which returns an empty jobs array when there are no matches and the subset of results + * when there are partial matches. If this parameter is false, the request returns a 404 status code when there are + * no matches or only partial matches. */ allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, allow_no_match?: never } } export interface MlGetModelSnapshotUpgradeStatsResponse { @@ -16927,15 +28674,29 @@ export interface MlGetModelSnapshotUpgradeStatsResponse { } export interface MlGetModelSnapshotsRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple + * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id?: Id + /** Skips the specified number of snapshots. */ from?: integer + /** Specifies the maximum number of snapshots to obtain. */ size?: integer + /** Refer to the description for the `desc` query parameter. */ desc?: boolean + /** Refer to the description for the `end` query parameter. */ end?: DateTime page?: MlPage + /** Refer to the description for the `sort` query parameter. */ sort?: Field + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, from?: never, size?: never, desc?: never, end?: never, page?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, from?: never, size?: never, desc?: never, end?: never, page?: never, sort?: never, start?: never } } export interface MlGetModelSnapshotsResponse { @@ -16944,32 +28705,63 @@ export interface MlGetModelSnapshotsResponse { } export interface MlGetOverallBucketsRequest extends RequestBase { + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, a comma-separated list of jobs or groups, or a wildcard + * expression. + * + * You can summarize the bucket results for all anomaly detection jobs by + * using `_all` or by specifying `*` as the ``. */ job_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean + /** Refer to the description for the `bucket_span` query parameter. */ bucket_span?: Duration + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean + /** Refer to the description for the `overall_score` query parameter. */ overall_score?: double | string + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** Refer to the description for the `top_n` query parameter. */ top_n?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, bucket_span?: never, end?: never, exclude_interim?: never, overall_score?: never, start?: never, top_n?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, bucket_span?: never, end?: never, exclude_interim?: never, overall_score?: never, start?: never, top_n?: never } } export interface MlGetOverallBucketsResponse { count: long + /** Array of overall bucket objects */ overall_buckets: MlOverallBucket[] } export interface MlGetRecordsRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** Skips the specified number of records. */ from?: integer + /** Specifies the maximum number of records to obtain. */ size?: integer + /** Refer to the description for the `desc` query parameter. */ desc?: boolean + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ exclude_interim?: boolean page?: MlPage + /** Refer to the description for the `record_score` query parameter. */ record_score?: double + /** Refer to the description for the `sort` query parameter. */ sort?: Field + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, from?: never, size?: never, desc?: never, end?: never, exclude_interim?: never, page?: never, record_score?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, from?: never, size?: never, desc?: never, end?: never, exclude_interim?: never, page?: never, record_score?: never, sort?: never, start?: never } } export interface MlGetRecordsResponse { @@ -16978,39 +28770,98 @@ export interface MlGetRecordsResponse { } export interface MlGetTrainedModelsRequest extends RequestBase { + /** The unique identifier of the trained model or a model alias. + * + * You can get information for multiple trained models in a single API + * request by using a comma-separated list of model IDs or a wildcard + * expression. */ model_id?: Ids + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no models that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * If true, it returns an empty array when there are no matches and the + * subset of results when there are partial matches. */ allow_no_match?: boolean + /** Specifies whether the included model definition should be returned as a + * JSON map (true) or in a custom compressed format (false). */ decompress_definition?: boolean + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** Skips the specified number of models. */ from?: integer + /** A comma delimited string of optional fields to include in the response + * body. */ include?: MlInclude + /** parameter is deprecated! Use [include=definition] instead */ include_model_definition?: boolean + /** Specifies the maximum number of models to obtain. */ size?: integer + /** A comma delimited string of tags. A trained model can have many tags, or + * none. When supplied, only trained models that contain all the supplied + * tags are returned. */ tags?: string | string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, include_model_definition?: never, size?: never, tags?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, include_model_definition?: never, size?: never, tags?: never } } export interface MlGetTrainedModelsResponse { count: integer + /** An array of trained model resources, which are sorted by the model_id value in ascending order. */ trained_model_configs: MlTrainedModelConfig[] } export interface MlGetTrainedModelsStatsRequest extends RequestBase { + /** The unique identifier of the trained model or a model alias. It can be a + * comma-separated list or a wildcard expression. */ model_id?: Ids + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no models that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * If true, it returns an empty array when there are no matches and the + * subset of results when there are partial matches. */ allow_no_match?: boolean + /** Skips the specified number of models. */ from?: integer + /** Specifies the maximum number of models to obtain. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, from?: never, size?: never } } export interface MlGetTrainedModelsStatsResponse { + /** The total number of trained model statistics that matched the requested ID patterns. Could be higher than the number of items in the trained_model_stats array as the size of the array is restricted by the supplied size parameter. */ count: integer + /** An array of trained model statistics, which are sorted by the model_id value in ascending order. */ trained_model_stats: MlTrainedModelStats[] } export interface MlInferTrainedModelRequest extends RequestBase { + /** The unique identifier of the trained model. */ model_id: Id + /** Controls the amount of time to wait for inference results. */ timeout?: Duration + /** An array of objects to pass to the model for inference. The objects should contain a fields matching your + * configured trained model input. Typically, for NLP models, the field name is `text_field`. + * Currently, for NLP models, only a single value is allowed. */ docs: Record[] + /** The inference configuration updates to apply on the API call */ inference_config?: MlInferenceConfigUpdateContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, timeout?: never, docs?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, timeout?: never, docs?: never, inference_config?: never } } export interface MlInferTrainedModelResponse { @@ -17048,6 +28899,10 @@ export interface MlInfoNativeCode { } export interface MlInfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface MlInfoResponse { @@ -17058,18 +28913,32 @@ export interface MlInfoResponse { } export interface MlOpenJobRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, timeout?: never } } export interface MlOpenJobResponse { opened: boolean + /** The ID of the node that the job was started on. In serverless this will be the "serverless". + * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ node: NodeId } export interface MlPostCalendarEventsRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. */ events: MlCalendarEvent[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, events?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, events?: never } } export interface MlPostCalendarEventsResponse { @@ -17077,10 +28946,17 @@ export interface MlPostCalendarEventsResponse { } export interface MlPostDataRequest extends RequestBase { + /** Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. */ job_id: Id + /** Specifies the end of the bucket resetting range. */ reset_end?: DateTime + /** Specifies the start of the bucket resetting range. */ reset_start?: DateTime data?: TData[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, reset_end?: never, reset_start?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, reset_end?: never, reset_start?: never, data?: never } } export interface MlPostDataResponse { @@ -17113,60 +28989,161 @@ export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { } export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. */ id?: Id + /** A data frame analytics config as described in create data frame analytics + * jobs. Note that `id` and `dest` don’t need to be provided in the context of + * this API. */ config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, config?: never } } export interface MlPreviewDataFrameAnalyticsResponse { + /** An array of objects that contain feature name and value pairs. The features have been processed and indicate what will be sent to the model for training. */ feature_values: Record[] } export interface MlPreviewDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase + * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + * characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job + * configuration details in the request body. */ datafeed_id?: Id + /** The start time from where the datafeed preview should begin */ start?: DateTime + /** The end time when the datafeed preview should stop */ end?: DateTime + /** The datafeed definition to preview. */ datafeed_config?: MlDatafeedConfig + /** The configuration details for the anomaly detection job that is associated with the datafeed. If the + * `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must + * supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is + * used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. */ job_config?: MlJobConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, start?: never, end?: never, datafeed_config?: never, job_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, start?: never, end?: never, datafeed_config?: never, job_config?: never } } export type MlPreviewDatafeedResponse = TDocument[] export interface MlPutCalendarRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** An array of anomaly detection job identifiers. */ job_ids?: Id[] + /** A description of the calendar. */ description?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_ids?: never, description?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_ids?: never, description?: never } } export interface MlPutCalendarResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlPutCalendarJobRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ job_id: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_id?: never } } export interface MlPutCalendarJobResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlPutDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. If + * set to `false` and a machine learning node with capacity to run the job + * cannot be immediately found, the API returns an error. If set to `true`, + * the API does not return an error; the job waits in the `starting` state + * until sufficient machine learning node capacity is available. This + * behavior is also affected by the cluster-wide + * `xpack.ml.max_lazy_ml_nodes` setting. */ allow_lazy_start?: boolean + /** The analysis configuration, which contains the information necessary to + * perform one of the following types of analysis: classification, outlier + * detection, or regression. */ analysis: MlDataframeAnalysisContainer + /** Specifies `includes` and/or `excludes` patterns to select which fields + * will be included in the analysis. The patterns specified in `excludes` + * are applied last, therefore `excludes` takes precedence. In other words, + * if the same field is specified in both `includes` and `excludes`, then + * the field will not be included in the analysis. If `analyzed_fields` is + * not set, only the relevant fields will be included. For example, all the + * numeric fields for outlier detection. + * The supported fields vary for each type of analysis. Outlier detection + * requires numeric or `boolean` data to analyze. The algorithms don’t + * support missing values therefore fields that have data types other than + * numeric or boolean are ignored. Documents where included fields contain + * missing values, null values, or an array are also ignored. Therefore the + * `dest` index may contain documents that don’t have an outlier score. + * Regression supports fields that are numeric, `boolean`, `text`, + * `keyword`, and `ip` data types. It is also tolerant of missing values. + * Fields that are supported are included in the analysis, other fields are + * ignored. Documents where included fields contain an array with two or + * more values are also ignored. Documents in the `dest` index that don’t + * contain a results field are not included in the regression analysis. + * Classification supports fields that are numeric, `boolean`, `text`, + * `keyword`, and `ip` data types. It is also tolerant of missing values. + * Fields that are supported are included in the analysis, other fields are + * ignored. Documents where included fields contain an array with two or + * more values are also ignored. Documents in the `dest` index that don’t + * contain a results field are not included in the classification analysis. + * Classification analysis can be improved by mapping ordinal variable + * values to a single number. For example, in case of age ranges, you can + * model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** A description of the job. */ description?: string + /** The destination configuration. */ dest: MlDataframeAnalyticsDestination + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer _meta?: Metadata + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + * to create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string + /** The configuration of how to source the analysis data. */ source: MlDataframeAnalyticsSource headers?: HttpHeaders version?: VersionString + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_lazy_start?: never, analysis?: never, analyzed_fields?: never, description?: never, dest?: never, max_num_threads?: never, _meta?: never, model_memory_limit?: never, source?: never, headers?: never, version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_lazy_start?: never, analysis?: never, analyzed_fields?: never, description?: never, dest?: never, max_num_threads?: never, _meta?: never, model_memory_limit?: never, source?: never, headers?: never, version?: never } } export interface MlPutDataFrameAnalyticsResponse { @@ -17186,29 +29163,82 @@ export interface MlPutDataFrameAnalyticsResponse { } export interface MlPutDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id + /** If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` + * string or when no indices are specified. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If true, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean + /** If set, the datafeed performs aggregation searches. + * Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record - /** @alias aggregations */ + /** If set, the datafeed performs aggregation searches. + * Support for aggregations is limited and should be used only with low cardinality data. + * @alias aggregations */ aggs?: Record + /** Datafeeds might be required to search over long time periods, for several months or years. + * This search is split into time chunks in order to ensure the load on Elasticsearch is managed. + * Chunking configuration controls how the size of these time chunks are calculated; + * it is an advanced configuration option. */ chunking_config?: MlChunkingConfig + /** Specifies whether the datafeed checks for missing data and the size of the window. + * The datafeed can optionally search over indices that have already been read in an effort to determine whether + * any data has subsequently been added to the index. If missing data is found, it is a good indication that the + * `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. + * This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. + * The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible + * fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last + * (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses + * aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master + * nodes and the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices - /** @alias indices */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master + * nodes and the machine learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: Indices + /** Specifies index expansion options that are used during search */ indices_options?: IndicesOptions + /** Identifier for the anomaly detection job. */ job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically + * stops and closes the associated job after this many real-time searches return no documents. In other words, + * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no + * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an + * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this + * object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might + * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default + * value is randomly selected between `60s` and `120s`. This randomness improves the query performance + * when there are multiple jobs running on the same node. */ query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. + * The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. + * The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer headers?: HttpHeaders + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, aggs?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never, headers?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, aggs?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never, headers?: never } } export interface MlPutDatafeedResponse { @@ -17230,9 +29260,17 @@ export interface MlPutDatafeedResponse { } export interface MlPutFilterRequest extends RequestBase { + /** A string that uniquely identifies a filter. */ filter_id: Id + /** A description of the filter. */ description?: string + /** The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. + * Up to 10000 items are allowed in each filter. */ items?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, description?: never, items?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, description?: never, items?: never } } export interface MlPutFilterResponse { @@ -17242,26 +29280,52 @@ export interface MlPutFilterResponse { } export interface MlPutJobRequest extends RequestBase { + /** The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ job_id: Id + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + * `_all` string or when no indices are specified. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If `true`, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean + /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. */ allow_lazy_open?: boolean + /** Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. */ background_persist_interval?: Duration + /** Advanced configuration option. Contains custom meta data about the job. */ custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long + /** Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. */ data_description: MlDataDescription + /** Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. */ datafeed_config?: MlDatafeedConfig + /** A description of the job. */ description?: string + /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] + /** This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. */ model_plot_config?: MlModelPlotConfig + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days?: long + /** Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. */ renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. */ results_index_name?: IndexName + /** Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. */ results_retention_days?: long + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_lazy_open?: never, analysis_config?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, daily_model_snapshot_retention_after_days?: never, data_description?: never, datafeed_config?: never, description?: never, groups?: never, model_plot_config?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_index_name?: never, results_retention_days?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_lazy_open?: never, analysis_config?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, daily_model_snapshot_retention_after_days?: never, data_description?: never, datafeed_config?: never, description?: never, groups?: never, model_plot_config?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_index_name?: never, results_retention_days?: never } } export interface MlPutJobResponse { @@ -17295,7 +29359,9 @@ export interface MlPutTrainedModelAggregateOutput { } export interface MlPutTrainedModelDefinition { + /** Collection of preprocessors */ preprocessors?: MlPutTrainedModelPreprocessor[] + /** The definition of the trained model. */ trained_model: MlPutTrainedModelTrainedModel } @@ -17329,20 +29395,55 @@ export interface MlPutTrainedModelPreprocessor { } export interface MlPutTrainedModelRequest extends RequestBase { + /** The unique identifier of the trained model. */ model_id: Id + /** If set to `true` and a `compressed_definition` is provided, + * the request defers definition decompression and skips relevant + * validations. */ defer_definition_decompression?: boolean + /** Whether to wait for all child operations (e.g. model download) + * to complete. */ wait_for_completion?: boolean + /** The compressed (GZipped and Base64 encoded) inference definition of the + * model. If compressed_definition is specified, then definition cannot be + * specified. */ compressed_definition?: string + /** The inference definition for the model. If definition is specified, then + * compressed_definition cannot be specified. */ definition?: MlPutTrainedModelDefinition + /** A human-readable description of the inference trained model. */ description?: string + /** The default configuration for inference. This can be either a regression + * or classification configuration. It must match the underlying + * definition.trained_model's target_type. For pre-packaged models such as + * ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer + /** The input field names for the model definition. */ input?: MlPutTrainedModelInput + /** An object map that contains metadata about the model. */ metadata?: any + /** The model type. */ model_type?: MlTrainedModelType + /** The estimated memory usage in bytes to keep the trained model in memory. + * This property is supported only if defer_definition_decompression is true + * or the model definition is not supplied. */ model_size_bytes?: long + /** The platform architecture (if applicable) of the trained mode. If the model + * only works on one platform, because it is heavily optimized for a particular + * processor architecture and OS combination, then this field specifies which. + * The format of the string must match the platform identifiers used by Elasticsearch, + * so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, + * or `windows-x86_64`. For portable models (those that work independent of processor + * architecture or OS features), leave this field unset. */ platform_architecture?: string + /** An array of tags to organize the model. */ tags?: string[] + /** Optional prefix strings applied at inference */ prefix_strings?: MlTrainedModelPrefixStrings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, defer_definition_decompression?: never, wait_for_completion?: never, compressed_definition?: never, definition?: never, description?: never, inference_config?: never, input?: never, metadata?: never, model_type?: never, model_size_bytes?: never, platform_architecture?: never, tags?: never, prefix_strings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, defer_definition_decompression?: never, wait_for_completion?: never, compressed_definition?: never, definition?: never, description?: never, inference_config?: never, input?: never, metadata?: never, model_type?: never, model_size_bytes?: never, platform_architecture?: never, tags?: never, prefix_strings?: never } } export type MlPutTrainedModelResponse = MlTrainedModelConfig @@ -17355,8 +29456,14 @@ export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { } export interface MlPutTrainedModelTrainedModel { + /** The definition for a binary decision tree. */ tree?: MlPutTrainedModelTrainedModelTree + /** The definition of a node in a tree. + * There are two major types of nodes: leaf nodes and not-leaf nodes. + * - Leaf nodes only need node_index and leaf_value defined. + * - All other nodes need split_feature, left_child, right_child, threshold, decision_type, and default_left defined. */ tree_node?: MlPutTrainedModelTrainedModelTreeNode + /** The definition for an ensemble model */ ensemble?: MlPutTrainedModelEnsemble } @@ -17384,44 +29491,90 @@ export interface MlPutTrainedModelWeights { } export interface MlPutTrainedModelAliasRequest extends RequestBase { + /** The alias to create or update. This value cannot end in numbers. */ model_alias: Name + /** The identifier for the trained model that the alias refers to. */ model_id: Id + /** Specifies whether the alias gets reassigned to the specified trained + * model if it is already assigned to a different model. If the alias is + * already assigned and this parameter is false, the API returns an error. */ reassign?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_alias?: never, model_id?: never, reassign?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_alias?: never, model_id?: never, reassign?: never } } export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { + /** The unique identifier of the trained model. */ model_id: Id + /** The definition part number. When the definition is loaded for inference the definition parts are streamed in the + * order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. */ part: integer + /** The definition part for the model. Must be a base64 encoded string. */ definition: string + /** The total uncompressed definition length in bytes. Not base64 encoded. */ total_definition_length: long + /** The total number of parts that will be uploaded. Must be greater than 0. */ total_parts: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, part?: never, definition?: never, total_definition_length?: never, total_parts?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, part?: never, definition?: never, total_definition_length?: never, total_parts?: never } } export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase export interface MlPutTrainedModelVocabularyRequest extends RequestBase { + /** The unique identifier of the trained model. */ model_id: Id + /** The model vocabulary, which must not be empty. */ vocabulary: string[] + /** The optional model merges if required by the tokenizer. */ merges?: string[] + /** The optional vocabulary value scores if required by the tokenizer. */ scores?: double[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, vocabulary?: never, merges?: never, scores?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, vocabulary?: never, merges?: never, scores?: never } } export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase export interface MlResetJobRequest extends RequestBase { + /** The ID of the job to reset. */ job_id: Id + /** Should this request wait until the operation has completed before + * returning. */ wait_for_completion?: boolean + /** Specifies whether annotations that have been added by the + * user should be deleted along with any auto-generated annotations when the job is + * reset. */ delete_user_annotations?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, wait_for_completion?: never, delete_user_annotations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, wait_for_completion?: never, delete_user_annotations?: never } } export type MlResetJobResponse = AcknowledgedResponseBase export interface MlRevertModelSnapshotRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** You can specify `empty` as the . Reverting to the empty + * snapshot means the anomaly detection job starts learning a new model from + * scratch when it is started. */ snapshot_id: Id + /** Refer to the description for the `delete_intervening_results` query parameter. */ delete_intervening_results?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, delete_intervening_results?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, delete_intervening_results?: never } } export interface MlRevertModelSnapshotResponse { @@ -17429,45 +29582,110 @@ export interface MlRevertModelSnapshotResponse { } export interface MlSetUpgradeModeRequest extends RequestBase { + /** When `true`, it enables `upgrade_mode` which temporarily halts all job + * and datafeed tasks and prohibits new job and datafeed tasks from + * starting. */ enabled?: boolean + /** The time to wait for the request to be completed. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { enabled?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { enabled?: never, timeout?: never } } export type MlSetUpgradeModeResponse = AcknowledgedResponseBase export interface MlStartDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id + /** Controls the amount of time to wait until the data frame analytics job + * starts. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, timeout?: never } } export interface MlStartDataFrameAnalyticsResponse { acknowledged: boolean + /** The ID of the node that the job was started on. If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. + * The node ID of the node the job has been assigned to, or + * an empty string if it hasn't been assigned to a node. In + * serverless if the job has been assigned to run then the + * node ID will be "serverless". */ node: NodeId } export interface MlStartDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase + * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + * characters. */ datafeed_id: Id + /** Refer to the description for the `end` query parameter. */ end?: DateTime + /** Refer to the description for the `start` query parameter. */ start?: DateTime + /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, end?: never, start?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, end?: never, start?: never, timeout?: never } } export interface MlStartDatafeedResponse { + /** The ID of the node that the job was started on. In serverless this will be the "serverless". + * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ node: NodeIds + /** For a successful response, this value is always `true`. On failure, an exception is returned instead. */ started: boolean } export interface MlStartTrainedModelDeploymentRequest extends RequestBase { + /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id + /** The inference cache size (in memory outside the JVM heap) per node for the model. + * The default value is the same size as the `model_size_bytes`. To disable the cache, + * `0b` can be provided. */ cache_size?: ByteSize + /** A unique identifier for the deployment of the model. + * @remarks This property is not supported on Elastic Cloud Serverless. */ deployment_id?: string + /** The number of model allocations on each node where the model is deployed. + * All allocations on a node share the same copy of the model in memory but use + * a separate set of threads to evaluate the model. + * Increasing this value generally increases the throughput. + * If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. + * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer + /** The deployment priority. */ priority?: MlTrainingPriority + /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds + * this value, new requests are rejected with a 429 error. */ queue_capacity?: integer + /** Sets the number of threads used by each model allocation during inference. This generally increases + * the inference speed. The inference process is a compute-bound process; any number + * greater than the number of available hardware threads on the machine does not increase the + * inference speed. If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. */ threads_per_allocation?: integer + /** Specifies the amount of time to wait for the model to deploy. */ timeout?: Duration + /** Specifies the allocation status to wait for before returning. */ wait_for?: MlDeploymentAllocationState + /** Adaptive allocations configuration. When enabled, the number of allocations + * is set based on the current load. + * If adaptive_allocations is enabled, do not set the number of allocations manually. */ adaptive_allocations?: MlAdaptiveAllocationsSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never, adaptive_allocations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never, adaptive_allocations?: never } } export interface MlStartTrainedModelDeploymentResponse { @@ -17475,10 +29693,31 @@ export interface MlStartTrainedModelDeploymentResponse { } export interface MlStopDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is true, which returns an empty data_frame_analytics + * array when there are no matches and the subset of results when there are + * partial matches. If this parameter is false, the request returns a 404 + * status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** If true, the data frame analytics job is stopped forcefully. */ force?: boolean + /** Controls the amount of time to wait until the data frame analytics job + * stops. Defaults to 20 seconds. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, force?: never, timeout?: never } } export interface MlStopDataFrameAnalyticsResponse { @@ -17486,10 +29725,20 @@ export interface MlStopDataFrameAnalyticsResponse { } export interface MlStopDatafeedRequest extends RequestBase { + /** Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated + * list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as + * the identifier. */ datafeed_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean + /** Refer to the description for the `force` query parameter. */ force?: boolean + /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, force?: never, timeout?: never } } export interface MlStopDatafeedResponse { @@ -17497,9 +29746,20 @@ export interface MlStopDatafeedResponse { } export interface MlStopTrainedModelDeploymentRequest extends RequestBase { + /** The unique identifier of the trained model. */ model_id: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; + * contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and + * there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you + * restart the model deployment. */ force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, force?: never } } export interface MlStopTrainedModelDeploymentResponse { @@ -17507,11 +29767,30 @@ export interface MlStopTrainedModelDeploymentResponse { } export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id + /** A description of the job. */ description?: string + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + * to create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, allow_lazy_start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, allow_lazy_start?: never } } export interface MlUpdateDataFrameAnalyticsResponse { @@ -17530,26 +29809,79 @@ export interface MlUpdateDataFrameAnalyticsResponse { } export interface MlUpdateDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + * `_all` string or when no indices are specified. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean + /** If `true`, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only + * with low cardinality data. */ aggregations?: Record + /** Datafeeds might search over long time periods, for several months or years. This search is split into time + * chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of + * these time chunks are calculated; it is an advanced configuration option. */ chunking_config?: MlChunkingConfig + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally + * search over indices that have already been read in an effort to determine whether any data has subsequently been + * added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and + * the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time + * datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is + * either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket + * span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are + * written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value + * must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine + * learning nodes must have the `remote_cluster_client` role. */ indices?: string[] - /** @alias indices */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine + * learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: string[] + /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically + * stops and closes the associated job after this many real-time searches return no documents. In other words, + * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no + * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an + * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this + * object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also + * changed. Therefore, the time required to learn might be long and the understandability of the results is + * unpredictable. If you want to make significant changes to the source data, it is recommended that you + * clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one + * when you are satisfied with the results of the job. */ query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might + * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default + * value is randomly selected between `60s` and `120s`. This randomness improves the query performance + * when there are multiple jobs running on the same node. */ query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. + * The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. + * The maximum value is the value of `index.max_result_window`. */ scroll_size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never } } export interface MlUpdateDatafeedResponse { @@ -17571,10 +29903,18 @@ export interface MlUpdateDatafeedResponse { } export interface MlUpdateFilterRequest extends RequestBase { + /** A string that uniquely identifies a filter. */ filter_id: Id + /** The items to add to the filter. */ add_items?: string[] + /** A description for the filter. */ description?: string + /** The items to remove from the filter. */ remove_items?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, add_items?: never, description?: never, remove_items?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, add_items?: never, description?: never, remove_items?: never } } export interface MlUpdateFilterResponse { @@ -17584,22 +29924,72 @@ export interface MlUpdateFilterResponse { } export interface MlUpdateJobRequest extends RequestBase { + /** Identifier for the job. */ job_id: Id + /** Advanced configuration option. Specifies whether this job can open when + * there is insufficient machine learning node capacity for it to be + * immediately assigned to a node. If `false` and a machine learning node + * with capacity to run the job cannot immediately be found, the open + * anomaly detection jobs API returns an error. However, this is also + * subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this + * option is set to `true`, the open anomaly detection jobs API does not + * return an error and the job waits in the opening state until sufficient + * machine learning node capacity is available. */ allow_lazy_open?: boolean analysis_limits?: MlAnalysisMemoryLimit + /** Advanced configuration option. The time between each periodic persistence + * of the model. + * The default value is a randomized value between 3 to 4 hours, which + * avoids all jobs persisting at exactly the same time. The smallest allowed + * value is 1 hour. + * For very large models (several GB), persistence could take 10-20 minutes, + * so do not set the value too low. + * If the job is open when you make the update, you must stop the datafeed, + * close the job, then reopen the job and restart the datafeed for the + * changes to take effect. */ background_persist_interval?: Duration + /** Advanced configuration option. Contains custom meta data about the job. + * For example, it can contain custom URL information as shown in Adding + * custom URLs to machine learning results. */ custom_settings?: Record categorization_filters?: string[] + /** A description of the job. */ description?: string model_plot_config?: MlModelPlotConfig model_prune_window?: Duration + /** Advanced configuration option, which affects the automatic removal of old + * model snapshots for this job. It specifies a period of time (in days) + * after which only the first snapshot per day is retained. This period is + * relative to the timestamp of the most recent snapshot for this job. Valid + * values range from 0 to `model_snapshot_retention_days`. For jobs created + * before version 7.8.0, the default value matches + * `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long + /** Advanced configuration option, which affects the automatic removal of old + * model snapshots for this job. It specifies the maximum period of time (in + * days) that snapshots are retained. This period is relative to the + * timestamp of the most recent snapshot for this job. */ model_snapshot_retention_days?: long + /** Advanced configuration option. The period over which adjustments to the + * score are applied, as new data is seen. */ renormalization_window_days?: long + /** Advanced configuration option. The period of time (in days) that results + * are retained. Age is calculated relative to the timestamp of the latest + * bucket result. If this property has a non-null value, once per day at + * 00:30 (server time), results that are the specified number of days older + * than the latest bucket result are deleted from Elasticsearch. The default + * value is null, which means all results are retained. */ results_retention_days?: long + /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] + /** An array of detector update objects. */ detectors?: MlDetectorUpdate[] + /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_lazy_open?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, categorization_filters?: never, description?: never, model_plot_config?: never, model_prune_window?: never, daily_model_snapshot_retention_after_days?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_retention_days?: never, groups?: never, detectors?: never, per_partition_categorization?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_lazy_open?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, categorization_filters?: never, description?: never, model_plot_config?: never, model_prune_window?: never, daily_model_snapshot_retention_after_days?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_retention_days?: never, groups?: never, detectors?: never, per_partition_categorization?: never } } export interface MlUpdateJobResponse { @@ -17627,10 +30017,20 @@ export interface MlUpdateJobResponse { } export interface MlUpdateModelSnapshotRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** Identifier for the model snapshot. */ snapshot_id: Id + /** A description of the model snapshot. */ description?: string + /** If `true`, this snapshot will not be deleted during automatic cleanup of + * snapshots older than `model_snapshot_retention_days`. However, this + * snapshot will be deleted when the job is deleted. */ retain?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, description?: never, retain?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, description?: never, retain?: never } } export interface MlUpdateModelSnapshotResponse { @@ -17639,9 +30039,24 @@ export interface MlUpdateModelSnapshotResponse { } export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { + /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id + /** The number of model allocations on each node where the model is deployed. + * All allocations on a node share the same copy of the model in memory but use + * a separate set of threads to evaluate the model. + * Increasing this value generally increases the throughput. + * If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. + * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer + /** Adaptive allocations configuration. When enabled, the number of allocations + * is set based on the current load. + * If adaptive_allocations is enabled, do not set the number of allocations manually. */ adaptive_allocations?: MlAdaptiveAllocationsSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, number_of_allocations?: never, adaptive_allocations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, number_of_allocations?: never, adaptive_allocations?: never } } export interface MlUpdateTrainedModelDeploymentResponse { @@ -17649,14 +30064,25 @@ export interface MlUpdateTrainedModelDeploymentResponse { } export interface MlUpgradeJobSnapshotRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id + /** When true, the API won’t respond until the upgrade is complete. + * Otherwise, it responds as soon as the upgrade task is assigned to a node. */ wait_for_completion?: boolean + /** Controls the time to wait for the request to complete. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, wait_for_completion?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, wait_for_completion?: never, timeout?: never } } export interface MlUpgradeJobSnapshotResponse { + /** The ID of the node that the upgrade task was started on if it is still running. In serverless this will be the "serverless". */ node: NodeId + /** When true, this means the task is complete. When false, it is still running. */ completed: boolean } @@ -17670,86 +30096,146 @@ export interface MlValidateRequest extends RequestBase { model_snapshot_id?: Id model_snapshot_retention_days?: long results_index_name?: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, analysis_config?: never, analysis_limits?: never, data_description?: never, description?: never, model_plot?: never, model_snapshot_id?: never, model_snapshot_retention_days?: never, results_index_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, analysis_config?: never, analysis_limits?: never, data_description?: never, description?: never, model_plot?: never, model_snapshot_id?: never, model_snapshot_retention_days?: never, results_index_name?: never } } export type MlValidateResponse = AcknowledgedResponseBase export interface MlValidateDetectorRequest extends RequestBase { detector?: MlDetector + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { detector?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { detector?: never } } export type MlValidateDetectorResponse = AcknowledgedResponseBase export interface MonitoringBulkRequest extends RequestBase { + /** Default document type for items which don't provide one */ type?: string + /** Identifier of the monitored system */ system_id: string + /** */ system_api_version: string + /** Collection interval (e.g., '10s' or '10000ms') of the payload */ interval: Duration operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { type?: never, system_id?: never, system_api_version?: never, interval?: never, operations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { type?: never, system_id?: never, system_api_version?: never, interval?: never, operations?: never } } export interface MonitoringBulkResponse { error?: ErrorCause + /** True if there is was an error */ errors: boolean + /** Was collection disabled? */ ignored: boolean took: long } export interface NodesAdaptiveSelection { + /** The exponentially weighted moving average queue size of search requests on the keyed node. */ avg_queue_size?: long + /** The exponentially weighted moving average response time of search requests on the keyed node. */ avg_response_time?: Duration + /** The exponentially weighted moving average response time, in nanoseconds, of search requests on the keyed node. */ avg_response_time_ns?: long + /** The exponentially weighted moving average service time of search requests on the keyed node. */ avg_service_time?: Duration + /** The exponentially weighted moving average service time, in nanoseconds, of search requests on the keyed node. */ avg_service_time_ns?: long + /** The number of outstanding search requests to the keyed node from the node these stats are for. */ outgoing_searches?: long + /** The rank of this node; used for shard selection when routing search requests. */ rank?: string } export interface NodesBreaker { + /** Estimated memory used for the operation. */ estimated_size?: string + /** Estimated memory used, in bytes, for the operation. */ estimated_size_in_bytes?: long + /** Memory limit for the circuit breaker. */ limit_size?: string + /** Memory limit, in bytes, for the circuit breaker. */ limit_size_in_bytes?: long + /** A constant that all estimates for the circuit breaker are multiplied with to calculate a final estimate. */ overhead?: float + /** Total number of times the circuit breaker has been triggered and prevented an out of memory error. */ tripped?: float } export interface NodesCgroup { + /** Contains statistics about `cpuacct` control group for the node. */ cpuacct?: NodesCpuAcct + /** Contains statistics about `cpu` control group for the node. */ cpu?: NodesCgroupCpu + /** Contains statistics about the memory control group for the node. */ memory?: NodesCgroupMemory } export interface NodesCgroupCpu { + /** The `cpu` control group to which the Elasticsearch process belongs. */ control_group?: string + /** The period of time, in microseconds, for how regularly all tasks in the same cgroup as the Elasticsearch process should have their access to CPU resources reallocated. */ cfs_period_micros?: integer + /** The total amount of time, in microseconds, for which all tasks in the same cgroup as the Elasticsearch process can run during one period `cfs_period_micros`. */ cfs_quota_micros?: integer + /** Contains CPU statistics for the node. */ stat?: NodesCgroupCpuStat } export interface NodesCgroupCpuStat { + /** The number of reporting periods (as specified by `cfs_period_micros`) that have elapsed. */ number_of_elapsed_periods?: long + /** The number of times all tasks in the same cgroup as the Elasticsearch process have been throttled. */ number_of_times_throttled?: long + /** The total amount of time, in nanoseconds, for which all tasks in the same cgroup as the Elasticsearch process have been throttled. */ time_throttled_nanos?: DurationValue } export interface NodesCgroupMemory { + /** The `memory` control group to which the Elasticsearch process belongs. */ control_group?: string + /** The maximum amount of user memory (including file cache) allowed for all tasks in the same cgroup as the Elasticsearch process. + * This value can be too big to store in a `long`, so is returned as a string so that the value returned can exactly match what the underlying operating system interface returns. + * Any value that is too large to parse into a `long` almost certainly means no limit has been set for the cgroup. */ limit_in_bytes?: string + /** The total current memory usage by processes in the cgroup, in bytes, by all tasks in the same cgroup as the Elasticsearch process. + * This value is stored as a string for consistency with `limit_in_bytes`. */ usage_in_bytes?: string } export interface NodesClient { + /** Unique ID for the HTTP client. */ id?: long + /** Reported agent for the HTTP client. + * If unavailable, this property is not included in the response. */ agent?: string + /** Local address for the HTTP connection. */ local_address?: string + /** Remote address for the HTTP connection. */ remote_address?: string + /** The URI of the client’s most recent request. */ last_uri?: string + /** Time at which the client opened the connection. */ opened_time_millis?: long + /** Time at which the client closed the connection if the connection is closed. */ closed_time_millis?: long + /** Time of the most recent request from this client. */ last_request_time_millis?: long + /** Number of requests from this client. */ request_count?: long + /** Cumulative size in bytes of all requests from this client. */ request_size_bytes?: long + /** Value from the client’s `x-opaque-id` HTTP header. + * If unavailable, this property is not included in the response. */ x_opaque_id?: string } @@ -17758,26 +30244,48 @@ export interface NodesClusterAppliedStats { } export interface NodesClusterStateQueue { + /** Total number of cluster states in queue. */ total?: long + /** Number of pending cluster states in queue. */ pending?: long + /** Number of committed cluster states in queue. */ committed?: long } export interface NodesClusterStateUpdate { + /** The number of cluster state update attempts that did not change the cluster state since the node started. */ count: long + /** The cumulative amount of time spent computing no-op cluster state updates since the node started. */ computation_time?: Duration + /** The cumulative amount of time, in milliseconds, spent computing no-op cluster state updates since the node started. */ computation_time_millis?: DurationValue + /** The cumulative amount of time spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. + * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ publication_time?: Duration + /** The cumulative amount of time, in milliseconds, spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. + * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ publication_time_millis?: DurationValue + /** The cumulative amount of time spent constructing a publication context since the node started for publications that ultimately succeeded. + * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ context_construction_time?: Duration + /** The cumulative amount of time, in milliseconds, spent constructing a publication context since the node started for publications that ultimately succeeded. + * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ context_construction_time_millis?: DurationValue + /** The cumulative amount of time spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ commit_time?: Duration + /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ commit_time_millis?: DurationValue + /** The cumulative amount of time spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ completion_time?: Duration + /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ completion_time_millis?: DurationValue + /** The cumulative amount of time spent successfully applying cluster state updates on the elected master since the node started. */ master_apply_time?: Duration + /** The cumulative amount of time, in milliseconds, spent successfully applying cluster state updates on the elected master since the node started. */ master_apply_time_millis?: DurationValue + /** The cumulative amount of time spent notifying listeners of a no-op cluster state update since the node started. */ notification_time?: Duration + /** The cumulative amount of time, in milliseconds, spent notifying listeners of a no-op cluster state update since the node started. */ notification_time_millis?: DurationValue } @@ -17800,12 +30308,16 @@ export interface NodesCpu { } export interface NodesCpuAcct { + /** The `cpuacct` control group to which the Elasticsearch process belongs. */ control_group?: string + /** The total CPU time, in nanoseconds, consumed by all tasks in the same cgroup as the Elasticsearch process. */ usage_nanos?: DurationValue } export interface NodesDataPathStats { + /** Total amount of disk space available to this Java virtual machine on this file store. */ available?: string + /** Total number of bytes available to this Java virtual machine on this file store. */ available_in_bytes?: long disk_queue?: string disk_reads?: long @@ -17814,58 +30326,98 @@ export interface NodesDataPathStats { disk_writes?: long disk_write_size?: string disk_write_size_in_bytes?: long + /** Total amount of unallocated disk space in the file store. */ free?: string + /** Total number of unallocated bytes in the file store. */ free_in_bytes?: long + /** Mount point of the file store (for example: `/dev/sda2`). */ mount?: string + /** Path to the file store. */ path?: string + /** Total size of the file store. */ total?: string + /** Total size of the file store in bytes. */ total_in_bytes?: long + /** Type of the file store (ex: ext4). */ type?: string } export interface NodesDiscovery { + /** Contains statistics for the cluster state queue of the node. */ cluster_state_queue?: NodesClusterStateQueue + /** Contains statistics for the published cluster states of the node. */ published_cluster_states?: NodesPublishedClusterStates + /** Contains low-level statistics about how long various activities took during cluster state updates while the node was the elected master. + * Omitted if the node is not master-eligible. + * Every field whose name ends in `_time` within this object is also represented as a raw number of milliseconds in a field whose name ends in `_time_millis`. + * The human-readable fields with a `_time` suffix are only returned if requested with the `?human=true` query parameter. */ cluster_state_update?: Record serialized_cluster_states?: NodesSerializedClusterState cluster_applier_stats?: NodesClusterAppliedStats } export interface NodesExtendedMemoryStats extends NodesMemoryStats { + /** Percentage of free memory. */ free_percent?: integer + /** Percentage of used memory. */ used_percent?: integer } export interface NodesFileSystem { + /** List of all file stores. */ data?: NodesDataPathStats[] + /** Last time the file stores statistics were refreshed. + * Recorded in milliseconds since the Unix Epoch. */ timestamp?: long + /** Contains statistics for all file stores of the node. */ total?: NodesFileSystemTotal + /** Contains I/O statistics for the node. */ io_stats?: NodesIoStats } export interface NodesFileSystemTotal { + /** Total disk space available to this Java virtual machine on all file stores. + * Depending on OS or process level restrictions, this might appear less than `free`. + * This is the actual amount of free disk space the Elasticsearch node can utilise. */ available?: string + /** Total number of bytes available to this Java virtual machine on all file stores. + * Depending on OS or process level restrictions, this might appear less than `free_in_bytes`. + * This is the actual amount of free disk space the Elasticsearch node can utilise. */ available_in_bytes?: long + /** Total unallocated disk space in all file stores. */ free?: string + /** Total number of unallocated bytes in all file stores. */ free_in_bytes?: long + /** Total size of all file stores. */ total?: string + /** Total size of all file stores in bytes. */ total_in_bytes?: long } export interface NodesGarbageCollector { + /** Contains statistics about JVM garbage collectors for the node. */ collectors?: Record } export interface NodesGarbageCollectorTotal { + /** Total number of JVM garbage collectors that collect objects. */ collection_count?: long + /** Total time spent by JVM collecting objects. */ collection_time?: string + /** Total time, in milliseconds, spent by JVM collecting objects. */ collection_time_in_millis?: long } export interface NodesHttp { + /** Current number of open HTTP connections for the node. */ current_open?: integer + /** Total number of HTTP connections opened for the node. */ total_opened?: long + /** Information on current and recently-closed HTTP client connections. + * Clients that have been closed longer than the `http.client_stats.closed_channels.max_age` setting will not be represented here. */ clients?: NodesClient[] + /** Detailed HTTP stats broken down by route + * @remarks This property is not supported on Elastic Cloud Serverless. */ routes: Record } @@ -17888,82 +30440,138 @@ export interface NodesHttpRouteResponses { } export interface NodesIndexingPressure { + /** Contains statistics for memory consumption from indexing load. */ memory?: NodesIndexingPressureMemory } export interface NodesIndexingPressureMemory { + /** Configured memory limit for the indexing requests. + * Replica requests have an automatic limit that is 1.5x this value. */ limit?: ByteSize + /** Configured memory limit, in bytes, for the indexing requests. + * Replica requests have an automatic limit that is 1.5x this value. */ limit_in_bytes?: long + /** Contains statistics for current indexing load. */ current?: NodesPressureMemory + /** Contains statistics for the cumulative indexing load since the node started. */ total?: NodesPressureMemory } export interface NodesIngest { + /** Contains statistics about ingest pipelines for the node. */ pipelines?: Record + /** Contains statistics about ingest operations for the node. */ total?: NodesIngestTotal } export interface NodesIngestStats { + /** Total number of documents ingested during the lifetime of this node. */ count: long + /** Total number of documents currently being ingested. */ current: long + /** Total number of failed ingest operations during the lifetime of this node. */ failed: long + /** Total number of ingest processors. */ processors: Record[] + /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ time_in_millis: DurationValue + /** Total number of bytes of all documents ingested by the pipeline. + * This field is only present on pipelines which are the first to process a document. + * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. */ ingested_as_first_pipeline_in_bytes: long + /** Total number of bytes of all documents produced by the pipeline. + * This field is only present on pipelines which are the first to process a document. + * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. + * In situations where there are subsequent pipelines, the value represents the size of the document after all pipelines have run. */ produced_as_first_pipeline_in_bytes: long } export interface NodesIngestTotal { + /** Total number of documents ingested during the lifetime of this node. */ count: long + /** Total number of documents currently being ingested. */ current: long + /** Total number of failed ingest operations during the lifetime of this node. */ failed: long + /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ time_in_millis: DurationValue } export interface NodesIoStatDevice { + /** The Linux device name. */ device_name?: string + /** The total number of read and write operations for the device completed since starting Elasticsearch. */ operations?: long + /** The total number of kilobytes read for the device since starting Elasticsearch. */ read_kilobytes?: long + /** The total number of read operations for the device completed since starting Elasticsearch. */ read_operations?: long + /** The total number of kilobytes written for the device since starting Elasticsearch. */ write_kilobytes?: long + /** The total number of write operations for the device completed since starting Elasticsearch. */ write_operations?: long } export interface NodesIoStats { + /** Array of disk metrics for each device that is backing an Elasticsearch data path. + * These disk metrics are probed periodically and averages between the last probe and the current probe are computed. */ devices?: NodesIoStatDevice[] + /** The sum of the disk metrics for all devices that back an Elasticsearch data path. */ total?: NodesIoStatDevice } export interface NodesJvm { + /** Contains statistics about JVM buffer pools for the node. */ buffer_pools?: Record + /** Contains statistics about classes loaded by JVM for the node. */ classes?: NodesJvmClasses + /** Contains statistics about JVM garbage collectors for the node. */ gc?: NodesGarbageCollector + /** Contains JVM memory usage statistics for the node. */ mem?: NodesJvmMemoryStats + /** Contains statistics about JVM thread usage for the node. */ threads?: NodesJvmThreads + /** Last time JVM statistics were refreshed. */ timestamp?: long + /** Human-readable JVM uptime. + * Only returned if the `human` query parameter is `true`. */ uptime?: string + /** JVM uptime in milliseconds. */ uptime_in_millis?: long } export interface NodesJvmClasses { + /** Number of classes currently loaded by JVM. */ current_loaded_count?: long + /** Total number of classes loaded since the JVM started. */ total_loaded_count?: long + /** Total number of classes unloaded since the JVM started. */ total_unloaded_count?: long } export interface NodesJvmMemoryStats { + /** Memory, in bytes, currently in use by the heap. */ heap_used_in_bytes?: long + /** Percentage of memory currently in use by the heap. */ heap_used_percent?: long + /** Amount of memory, in bytes, available for use by the heap. */ heap_committed_in_bytes?: long + /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes?: long + /** Maximum amount of memory, available for use by the heap. */ heap_max?: ByteSize + /** Non-heap memory used, in bytes. */ non_heap_used_in_bytes?: long + /** Amount of non-heap memory available, in bytes. */ non_heap_committed_in_bytes?: long + /** Contains statistics about heap memory usage for the node. */ pools?: Record } export interface NodesJvmThreads { + /** Number of active threads in use by JVM. */ count?: long + /** Highest number of threads used by JVM. */ peak_count?: long } @@ -17973,6 +30581,8 @@ export interface NodesKeyedProcessor { } export interface NodesMemoryStats { + /** If the amount of physical memory has been overridden using the `es`.`total_memory_bytes` system property then this reports the overridden value in bytes. + * Otherwise it reports the same value as `total_in_bytes`. */ adjusted_total_in_bytes?: long resident?: string resident_in_bytes?: long @@ -17980,16 +30590,24 @@ export interface NodesMemoryStats { share_in_bytes?: long total_virtual?: string total_virtual_in_bytes?: long + /** Total amount of physical memory in bytes. */ total_in_bytes?: long + /** Amount of free physical memory in bytes. */ free_in_bytes?: long + /** Amount of used physical memory in bytes. */ used_in_bytes?: long } export interface NodesNodeBufferPool { + /** Number of buffer pools. */ count?: long + /** Total capacity of buffer pools. */ total_capacity?: string + /** Total capacity of buffer pools in bytes. */ total_capacity_in_bytes?: long + /** Size of buffer pools. */ used?: string + /** Size of buffer pools in bytes. */ used_in_bytes?: long } @@ -17999,6 +30617,7 @@ export interface NodesNodeReloadResult { } export interface NodesNodesResponseBase { + /** Contains statistics about the number of nodes selected by the request’s node filters. */ _nodes?: NodeStatistics } @@ -18011,48 +30630,80 @@ export interface NodesOperatingSystem { } export interface NodesPool { + /** Memory, in bytes, used by the heap. */ used_in_bytes?: long + /** Maximum amount of memory, in bytes, available for use by the heap. */ max_in_bytes?: long + /** Largest amount of memory, in bytes, historically used by the heap. */ peak_used_in_bytes?: long + /** Largest amount of memory, in bytes, historically used by the heap. */ peak_max_in_bytes?: long } export interface NodesPressureMemory { + /** Memory consumed by indexing requests in the coordinating, primary, or replica stage. */ all?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating, primary, or replica stage. */ all_in_bytes?: long + /** Memory consumed by indexing requests in the coordinating or primary stage. + * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ combined_coordinating_and_primary?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating or primary stage. + * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ combined_coordinating_and_primary_in_bytes?: long + /** Memory consumed by indexing requests in the coordinating stage. */ coordinating?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating stage. */ coordinating_in_bytes?: long + /** Memory consumed by indexing requests in the primary stage. */ primary?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the primary stage. */ primary_in_bytes?: long + /** Memory consumed by indexing requests in the replica stage. */ replica?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the replica stage. */ replica_in_bytes?: long + /** Number of indexing requests rejected in the coordinating stage. */ coordinating_rejections?: long + /** Number of indexing requests rejected in the primary stage. */ primary_rejections?: long + /** Number of indexing requests rejected in the replica stage. */ replica_rejections?: long primary_document_rejections?: long large_operation_rejections?: long } export interface NodesProcess { + /** Contains CPU statistics for the node. */ cpu?: NodesCpu + /** Contains virtual memory statistics for the node. */ mem?: NodesMemoryStats + /** Number of opened file descriptors associated with the current or `-1` if not supported. */ open_file_descriptors?: integer + /** Maximum number of file descriptors allowed on the system, or `-1` if not supported. */ max_file_descriptors?: integer + /** Last time the statistics were refreshed. + * Recorded in milliseconds since the Unix Epoch. */ timestamp?: long } export interface NodesProcessor { + /** Number of documents transformed by the processor. */ count?: long + /** Number of documents currently being transformed by the processor. */ current?: long + /** Number of failed operations for the processor. */ failed?: long + /** Time, in milliseconds, spent by the processor transforming documents. */ time_in_millis?: DurationValue } export interface NodesPublishedClusterStates { + /** Number of published cluster states. */ full_states?: long + /** Number of incompatible differences between published cluster states. */ incompatible_diffs?: long + /** Number of compatible differences between published cluster states. */ compatible_diffs?: long } @@ -18065,52 +30716,89 @@ export interface NodesRecording { export interface NodesRepositoryLocation { base_path: string + /** Container name (Azure) */ container?: string + /** Bucket name (GCP, S3) */ bucket?: string } export interface NodesRepositoryMeteringInformation { + /** Repository name. */ repository_name: Name + /** Repository type. */ repository_type: string + /** Represents an unique location within the repository. */ repository_location: NodesRepositoryLocation + /** An identifier that changes every time the repository is updated. */ repository_ephemeral_id: Id + /** Time the repository was created or updated. Recorded in milliseconds since the Unix Epoch. */ repository_started_at: EpochTime + /** Time the repository was deleted or updated. Recorded in milliseconds since the Unix Epoch. */ repository_stopped_at?: EpochTime + /** A flag that tells whether or not this object has been archived. When a repository is closed or updated the + * repository metering information is archived and kept for a certain period of time. This allows retrieving the + * repository metering information of previous repository instantiations. */ archived: boolean + /** The cluster state version when this object was archived, this field can be used as a logical timestamp to delete + * all the archived metrics up to an observed version. This field is only present for archived repository metering + * information objects. The main purpose of this field is to avoid possible race conditions during repository metering + * information deletions, i.e. deleting archived repositories metering information that we haven’t observed yet. */ cluster_version?: VersionNumber + /** An object with the number of request performed against the repository grouped by request type. */ request_counts: NodesRequestCounts } export interface NodesRequestCounts { + /** Number of Get Blob Properties requests (Azure) */ GetBlobProperties?: long + /** Number of Get Blob requests (Azure) */ GetBlob?: long + /** Number of List Blobs requests (Azure) */ ListBlobs?: long + /** Number of Put Blob requests (Azure) */ PutBlob?: long + /** Number of Put Block (Azure) */ PutBlock?: long + /** Number of Put Block List requests */ PutBlockList?: long + /** Number of get object requests (GCP, S3) */ GetObject?: long + /** Number of list objects requests (GCP, S3) */ ListObjects?: long + /** Number of insert object requests, including simple, multipart and resumable uploads. Resumable uploads + * can perform multiple http requests to insert a single object but they are considered as a single request + * since they are billed as an individual operation. (GCP) */ InsertObject?: long + /** Number of PutObject requests (S3) */ PutObject?: long + /** Number of Multipart requests, including CreateMultipartUpload, UploadPart and CompleteMultipartUpload requests (S3) */ PutMultipartObject?: long } export interface NodesScriptCache { + /** Total number of times the script cache has evicted old data. */ cache_evictions?: long + /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ compilation_limit_triggered?: long + /** Total number of inline script compilations performed by the node. */ compilations?: long context?: string } export interface NodesScripting { + /** Total number of times the script cache has evicted old data. */ cache_evictions?: long + /** Total number of inline script compilations performed by the node. */ compilations?: long + /** Contains this recent history of script compilations. */ compilations_history?: Record + /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ compilation_limit_triggered?: long contexts?: NodesContext[] } export interface NodesSerializedClusterState { + /** Number of published cluster states. */ full_states?: NodesSerializedClusterStateDetail diffs?: NodesSerializedClusterStateDetail } @@ -18130,36 +30818,63 @@ export interface NodesSizeHttpHistogram { } export interface NodesStats { + /** Statistics about adaptive replica selection. */ adaptive_selection?: Record + /** Statistics about the field data circuit breaker. */ breakers?: Record + /** File system information, data path, free disk space, read/write stats. */ fs?: NodesFileSystem + /** Network host for the node, based on the network host setting. */ host?: Host + /** HTTP connection information. */ http?: NodesHttp + /** Statistics about ingest preprocessing. */ ingest?: NodesIngest + /** IP address and port for the node. */ ip?: Ip | Ip[] + /** JVM stats, memory pool information, garbage collection, buffer pools, number of loaded/unloaded classes. */ jvm?: NodesJvm + /** Human-readable identifier for the node. + * Based on the node name setting. */ name?: Name + /** Operating system stats, load average, mem, swap. */ os?: NodesOperatingSystem + /** Process statistics, memory consumption, cpu usage, open file descriptors. */ process?: NodesProcess + /** Roles assigned to the node. */ roles?: NodeRoles + /** Contains script statistics for the node. */ script?: NodesScripting script_cache?: Record + /** Statistics about each thread pool, including current size, queue and rejected tasks. */ thread_pool?: Record timestamp?: long + /** Transport statistics about sent and received bytes in cluster communication. */ transport?: NodesTransport + /** Host and port for the transport layer, used for internal communication between nodes in a cluster. */ transport_address?: TransportAddress + /** Contains a list of attributes for the node. */ attributes?: Record + /** Contains node discovery statistics for the node. */ discovery?: NodesDiscovery + /** Contains indexing pressure statistics for the node. */ indexing_pressure?: NodesIndexingPressure + /** Indices stats about size, document count, indexing and deletion times, search times, field cache size, merges and flushes. */ indices?: IndicesStatsShardStats } export interface NodesThreadCount { + /** Number of active threads in the thread pool. */ active?: long + /** Number of tasks completed by the thread pool executor. */ completed?: long + /** Highest number of active threads in the thread pool. */ largest?: long + /** Number of tasks in queue for the thread pool. */ queue?: long + /** Number of tasks rejected by the thread pool executor. */ rejected?: long + /** Number of threads in the thread pool. */ threads?: long } @@ -18170,56 +30885,102 @@ export interface NodesTimeHttpHistogram { } export interface NodesTransport { + /** The distribution of the time spent handling each inbound message on a transport thread, represented as a histogram. */ inbound_handling_time_histogram?: NodesTransportHistogram[] + /** The distribution of the time spent sending each outbound transport message on a transport thread, represented as a histogram. */ outbound_handling_time_histogram?: NodesTransportHistogram[] + /** Total number of RX (receive) packets received by the node during internal cluster communication. */ rx_count?: long + /** Size of RX packets received by the node during internal cluster communication. */ rx_size?: string + /** Size, in bytes, of RX packets received by the node during internal cluster communication. */ rx_size_in_bytes?: long + /** Current number of inbound TCP connections used for internal communication between nodes. */ server_open?: integer + /** Total number of TX (transmit) packets sent by the node during internal cluster communication. */ tx_count?: long + /** Size of TX packets sent by the node during internal cluster communication. */ tx_size?: string + /** Size, in bytes, of TX packets sent by the node during internal cluster communication. */ tx_size_in_bytes?: long + /** The cumulative number of outbound transport connections that this node has opened since it started. + * Each transport connection may comprise multiple TCP connections but is only counted once in this statistic. + * Transport connections are typically long-lived so this statistic should remain constant in a stable cluster. */ total_outbound_connections?: long } export interface NodesTransportHistogram { + /** The number of times a transport thread took a period of time within the bounds of this bucket to handle an inbound message. */ count?: long + /** The exclusive upper bound of the bucket in milliseconds. + * May be omitted on the last bucket if this bucket has no upper bound. */ lt_millis?: long + /** The inclusive lower bound of the bucket in milliseconds. May be omitted on the first bucket if this bucket has no lower bound. */ ge_millis?: long } export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id: NodeIds + /** Specifies the maximum `archive_version` to be cleared from the archive. */ max_archive_version: long + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, max_archive_version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, max_archive_version?: never } } export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { + /** Name of the cluster. Based on the `cluster.name` setting. */ cluster_name: Name + /** Contains repositories metering information for the nodes selected by the request. */ nodes: Record } export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { + /** Comma-separated list of node IDs or names used to limit returned information. + * For more information about the nodes selective options, refer to the node specification documentation. */ node_id: NodeIds + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never } } export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { + /** Name of the cluster. Based on the `cluster.name` setting. */ cluster_name: Name + /** Contains repositories metering information for the nodes selected by the request. */ nodes: Record } export interface NodesHotThreadsRequest extends RequestBase { + /** List of node IDs or names used to limit returned information. */ node_id?: NodeIds + /** If true, known idle threads (e.g. waiting in a socket select, or to get + * a task from an empty queue) are filtered out. */ ignore_idle_threads?: boolean + /** The interval to do the second sampling of threads. */ interval?: Duration + /** Number of samples of thread stacktrace. */ snapshots?: long + /** Specifies the number of hot threads to provide information for. */ threads?: long + /** Period to wait for a response. If no response is received + * before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The type to sample. */ type?: ThreadType + /** The sort order for 'cpu' type (default: total) */ sort?: ThreadType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, ignore_idle_threads?: never, interval?: never, snapshots?: never, threads?: never, timeout?: never, type?: never, sort?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, ignore_idle_threads?: never, interval?: never, snapshots?: never, threads?: never, timeout?: never, type?: never, sort?: never } } export interface NodesHotThreadsResponse { @@ -18232,14 +30993,18 @@ export interface NodesInfoDeprecationIndexing { export interface NodesInfoNodeInfo { attributes: Record build_flavor: string + /** Short hash of the last git commit in this release. */ build_hash: string build_type: string component_versions: Record + /** The node’s host name. */ host: Host http?: NodesInfoNodeInfoHttp index_version: VersionNumber + /** The node’s IP address. */ ip: Ip jvm?: NodesInfoNodeJvmInfo + /** The node's name */ name: Name os?: NodesInfoNodeOperatingSystemInfo plugins?: PluginStats[] @@ -18247,11 +31012,15 @@ export interface NodesInfoNodeInfo { roles: NodeRoles settings?: NodesInfoNodeInfoSettings thread_pool?: Record + /** Total heap allowed to be used to hold recently indexed documents before they must be written to disk. This size is a shared pool across all shards on this node, and is controlled by Indexing Buffer settings. */ total_indexing_buffer?: long + /** Same as total_indexing_buffer, but expressed in bytes. */ total_indexing_buffer_in_bytes?: ByteSize transport?: NodesInfoNodeInfoTransport + /** Host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress transport_version: VersionNumber + /** Elasticsearch version running on this node. */ version: VersionString modules?: PluginStats[] ingest?: NodesInfoNodeInfoIngest @@ -18455,6 +31224,7 @@ export interface NodesInfoNodeInfoSettingsTransport { type: NodesInfoNodeInfoSettingsTransportType | string 'type.default'?: string features?: NodesInfoNodeInfoSettingsTransportFeatures + /** Only used in unit tests */ ignore_deserialization_errors?: SpecUtilsStringified } @@ -18538,12 +31308,18 @@ export interface NodesInfoNodeJvmInfo { } export interface NodesInfoNodeOperatingSystemInfo { + /** Name of the JVM architecture (ex: amd64, x86) */ arch: string + /** Number of processors available to the Java virtual machine */ available_processors: integer + /** The number of processors actually used to calculate thread pool size. This number can be set with the node.processors setting of a node and defaults to the number of processors reported by the OS. */ allocated_processors?: integer + /** Name of the operating system (ex: Linux, Windows, Mac OS X) */ name: Name pretty_name: Name + /** Refresh interval for the OS statistics */ refresh_interval_in_millis: DurationValue + /** Version of the operating system */ version: VersionString cpu?: NodesInfoNodeInfoOSCPU mem?: NodesInfoNodeInfoMemory @@ -18551,8 +31327,11 @@ export interface NodesInfoNodeOperatingSystemInfo { } export interface NodesInfoNodeProcessInfo { + /** Process identifier (PID) */ id: long + /** Indicates if the process address space has been successfully locked in memory */ mlockall: boolean + /** Refresh interval for the process statistics */ refresh_interval_in_millis: DurationValue } @@ -18571,10 +31350,18 @@ export interface NodesInfoRemoveClusterServer { } export interface NodesInfoRequest extends RequestBase { + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds + /** Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. */ metric?: Metrics + /** If true, returns settings in flat format. */ flat_settings?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, flat_settings?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, flat_settings?: never, timeout?: never } } export type NodesInfoResponse = NodesInfoResponseBase @@ -18585,9 +31372,17 @@ export interface NodesInfoResponseBase extends NodesNodesResponseBase { } export interface NodesReloadSecureSettingsRequest extends RequestBase { + /** The names of particular nodes in the cluster to target. */ node_id?: NodeIds + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The password for the Elasticsearch keystore. */ secure_settings_password?: Password + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, timeout?: never, secure_settings_password?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, timeout?: never, secure_settings_password?: never } } export type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase @@ -18598,18 +31393,34 @@ export interface NodesReloadSecureSettingsResponseBase extends NodesNodesRespons } export interface NodesStatsRequest extends RequestBase { + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds + /** Limit the information returned to the specified metrics */ metric?: Metrics + /** Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. */ index_metric?: Metrics + /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ fielddata_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields + /** Comma-separated list of search groups to include in the search statistics. */ groups?: boolean + /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ include_segment_file_sizes?: boolean + /** Indicates whether statistics are aggregated at the cluster, index, or shard level. */ level?: Level + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** A comma-separated list of document types for the indexing index metric. */ types?: string[] + /** If `true`, the response includes information from segments that are not loaded into memory. */ include_unloaded_segments?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, index_metric?: never, completion_fields?: never, fielddata_fields?: never, fields?: never, groups?: never, include_segment_file_sizes?: never, level?: never, timeout?: never, types?: never, include_unloaded_segments?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, index_metric?: never, completion_fields?: never, fielddata_fields?: never, fields?: never, groups?: never, include_segment_file_sizes?: never, level?: never, timeout?: never, types?: never, include_unloaded_segments?: never } } export type NodesStatsResponse = NodesStatsResponseBase @@ -18627,9 +31438,18 @@ export interface NodesUsageNodeUsage { } export interface NodesUsageRequest extends RequestBase { + /** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ node_id?: NodeIds + /** Limits the information returned to the specific metrics. + * A comma-separated list of the following options: `_all`, `rest_actions`. */ metric?: Metrics + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, timeout?: never } } export type NodesUsageResponse = NodesUsageResponseBase @@ -18640,21 +31460,56 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { } export interface QueryRulesQueryRule { + /** A unique identifier for the rule. */ rule_id: Id + /** The type of rule. + * `pinned` will identify and pin specific documents to the top of search results. + * `exclude` will exclude specific documents from search results. */ type: QueryRulesQueryRuleType + /** The criteria that must be met for the rule to be applied. + * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + /** The actions to take when the rule is matched. + * The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions priority?: integer } export interface QueryRulesQueryRuleActions { + /** The unique document IDs of the documents to apply the rule to. + * Only one of `ids` or `docs` may be specified and at least one must be specified. */ ids?: Id[] + /** The documents to apply the rule to. + * Only one of `ids` or `docs` may be specified and at least one must be specified. + * There is a maximum value of 100 documents in a rule. + * You can specify the following attributes for each document: + * + * * `_index`: The index of the document to pin. + * * `_id`: The unique document ID. */ docs?: QueryDslPinnedDoc[] } export interface QueryRulesQueryRuleCriteria { + /** The type of criteria. The following criteria types are supported: + * + * * `always`: Matches all queries, regardless of input. + * * `contains`: Matches that contain this value anywhere in the field meet the criteria defined by the rule. Only applicable for string values. + * * `exact`: Only exact matches meet the criteria defined by the rule. Applicable for string or numerical values. + * * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit Distance meet the criteria defined by the rule. Only applicable for string values. + * * `gt`: Matches with a value greater than this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `gte`: Matches with a value greater than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `lt`: Matches with a value less than this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `lte`: Matches with a value less than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `prefix`: Matches that start with this value meet the criteria defined by the rule. Only applicable for string values. + * * `suffix`: Matches that end with this value meet the criteria defined by the rule. Only applicable for string values. */ type: QueryRulesQueryRuleCriteriaType + /** The metadata field to match against. + * This metadata will be used to match against `match_criteria` sent in the rule. + * It is required for all criteria types except `always`. */ metadata?: string + /** The values to match against the `metadata` field. + * Only one value must match for the criteria to be met. + * It is required for all criteria types except `always`. */ values?: any[] } @@ -18663,46 +31518,82 @@ export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' export type QueryRulesQueryRuleType = 'pinned' | 'exclude' export interface QueryRulesQueryRuleset { + /** A unique identifier for the ruleset. */ ruleset_id: Id + /** Rules associated with the query ruleset. */ rules: QueryRulesQueryRule[] } export interface QueryRulesDeleteRuleRequest extends RequestBase { + /** The unique identifier of the query ruleset containing the rule to delete */ ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to delete */ rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never } } export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase export interface QueryRulesDeleteRulesetRequest extends RequestBase { + /** The unique identifier of the query ruleset to delete */ ruleset_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never } } export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase export interface QueryRulesGetRuleRequest extends RequestBase { + /** The unique identifier of the query ruleset containing the rule to retrieve */ ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to retrieve */ rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never } } export type QueryRulesGetRuleResponse = QueryRulesQueryRule export interface QueryRulesGetRulesetRequest extends RequestBase { + /** The unique identifier of the query ruleset */ ruleset_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never } } export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset export interface QueryRulesListRulesetsQueryRulesetListItem { + /** A unique identifier for the ruleset. */ ruleset_id: Id + /** The number of rules associated with the ruleset. */ rule_total_count: integer + /** A map of criteria type (for example, `exact`) to the number of rules of that type. + * + * NOTE: The counts in `rule_criteria_types_counts` may be larger than the value of `rule_total_count` because a rule may have multiple criteria. */ rule_criteria_types_counts: Record + /** A map of rule type (for example, `pinned`) to the number of rules of that type. */ rule_type_counts: Record } export interface QueryRulesListRulesetsRequest extends RequestBase { + /** The offset from the first result to fetch. */ from?: integer + /** The maximum number of results to retrieve. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never } } export interface QueryRulesListRulesetsResponse { @@ -18711,12 +31602,23 @@ export interface QueryRulesListRulesetsResponse { } export interface QueryRulesPutRuleRequest extends RequestBase { + /** The unique identifier of the query ruleset containing the rule to be created or updated. */ ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to be created or updated. */ rule_id: Id + /** The type of rule. */ type: QueryRulesQueryRuleType + /** The criteria that must be met for the rule to be applied. + * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + /** The actions to take when the rule is matched. + * The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions priority?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never, type?: never, criteria?: never, actions?: never, priority?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never, type?: never, criteria?: never, actions?: never, priority?: never } } export interface QueryRulesPutRuleResponse { @@ -18724,8 +31626,13 @@ export interface QueryRulesPutRuleResponse { } export interface QueryRulesPutRulesetRequest extends RequestBase { + /** The unique identifier of the query ruleset to be created or updated. */ ruleset_id: Id rules: QueryRulesQueryRule | QueryRulesQueryRule[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rules?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rules?: never } } export interface QueryRulesPutRulesetResponse { @@ -18733,13 +31640,22 @@ export interface QueryRulesPutRulesetResponse { } export interface QueryRulesTestQueryRulesetMatchedRule { + /** Ruleset unique identifier */ ruleset_id: Id + /** Rule unique identifier within that ruleset */ rule_id: Id } export interface QueryRulesTestRequest extends RequestBase { + /** The unique identifier of the query ruleset to be created or updated */ ruleset_id: Id + /** The match criteria to apply to rules in the given query ruleset. + * Match criteria should match the keys defined in the `criteria.metadata` field of the rule. */ match_criteria: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, match_criteria?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, match_criteria?: never } } export interface QueryRulesTestResponse { @@ -18748,39 +31664,72 @@ export interface QueryRulesTestResponse { } export interface RollupDateHistogramGrouping { + /** How long to wait before rolling up new documents. + * By default, the indexer attempts to roll up all data that is available. + * However, it is not uncommon for data to arrive out of order. + * The indexer is unable to deal with data that arrives after a time-span has been rolled up. + * You need to specify a delay that matches the longest period of time you expect out-of-order data to arrive. */ delay?: Duration + /** The date field that is to be rolled up. */ field: Field format?: string interval?: Duration + /** The interval of time buckets to be generated when rolling up. */ calendar_interval?: Duration + /** The interval of time buckets to be generated when rolling up. */ fixed_interval?: Duration + /** Defines what `time_zone` the rollup documents are stored as. + * Unlike raw data, which can shift timezones on the fly, rolled documents have to be stored with a specific timezone. + * By default, rollup documents are stored in `UTC`. */ time_zone?: TimeZone } export interface RollupFieldMetric { + /** The field to collect metrics for. This must be a numeric of some kind. */ field: Field + /** An array of metrics to collect for the field. At least one metric must be configured. */ metrics: RollupMetric[] } export interface RollupGroupings { + /** A date histogram group aggregates a date field into time-based buckets. + * This group is mandatory; you currently cannot roll up documents without a timestamp and a `date_histogram` group. */ date_histogram?: RollupDateHistogramGrouping + /** The histogram group aggregates one or more numeric fields into numeric histogram intervals. */ histogram?: RollupHistogramGrouping + /** The terms group can be used on keyword or numeric fields to allow bucketing via the terms aggregation at a later point. + * The indexer enumerates and stores all values of a field for each time-period. + * This can be potentially costly for high-cardinality groups such as IP addresses, especially if the time-bucket is particularly sparse. */ terms?: RollupTermsGrouping } export interface RollupHistogramGrouping { + /** The set of fields that you wish to build histograms for. + * All fields specified must be some kind of numeric. + * Order does not matter. */ fields: Fields + /** The interval of histogram buckets to be generated when rolling up. + * For example, a value of `5` creates buckets that are five units wide (`0-5`, `5-10`, etc). + * Note that only one interval can be specified in the histogram group, meaning that all fields being grouped via the histogram must share the same interval. */ interval: long } export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' export interface RollupTermsGrouping { + /** The set of fields that you wish to collect terms for. + * This array can contain fields that are both keyword and numerics. + * Order does not matter. */ fields: Fields } export interface RollupDeleteJobRequest extends RequestBase { + /** Identifier for the job. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface RollupDeleteJobResponse { @@ -18791,7 +31740,13 @@ export interface RollupDeleteJobResponse { export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' export interface RollupGetJobsRequest extends RequestBase { + /** Identifier for the rollup job. + * If it is `_all` or omitted, the API returns all rollup jobs. */ id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface RollupGetJobsResponse { @@ -18799,8 +31754,13 @@ export interface RollupGetJobsResponse { } export interface RollupGetJobsRollupJob { + /** The rollup job configuration. */ config: RollupGetJobsRollupJobConfiguration + /** Transient statistics about the rollup job, such as how many documents have been processed and how many rollup summary docs have been indexed. + * These stats are not persisted. + * If a node is restarted, these stats are reset. */ stats: RollupGetJobsRollupJobStats + /** The current status of the indexer for the rollup job. */ status: RollupGetJobsRollupJobStatus } @@ -18837,12 +31797,19 @@ export interface RollupGetJobsRollupJobStatus { } export interface RollupGetRollupCapsRequest extends RequestBase { + /** Index, indices or index-pattern to return rollup capabilities for. + * `_all` may be used to fetch rollup capabilities from all jobs. */ id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type RollupGetRollupCapsResponse = Record export interface RollupGetRollupCapsRollupCapabilities { + /** There can be multiple, independent jobs configured for a single index or index pattern. Each of these jobs may have different configurations, so the API returns a list of all the various configurations available. */ rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] } @@ -18864,7 +31831,13 @@ export interface RollupGetRollupIndexCapsIndexCapabilities { } export interface RollupGetRollupIndexCapsRequest extends RequestBase { + /** Data stream or index to check for rollup capabilities. + * Wildcard (`*`) expressions are supported. */ index: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } } export type RollupGetRollupIndexCapsResponse = Record @@ -18883,28 +31856,74 @@ export interface RollupGetRollupIndexCapsRollupJobSummaryField { } export interface RollupPutJobRequest extends RequestBase { + /** Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the + * data that is associated with the rollup job. The ID is persistent; it is stored with the rolled + * up data. If you create a job, let it run for a while, then delete the job, the data that the job + * rolled up is still be associated with this job ID. You cannot create a new job with the same ID + * since that could lead to problems with mismatched job configurations. */ id: Id + /** A cron string which defines the intervals when the rollup job should be executed. When the interval + * triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated + * to the time interval of the data being rolled up. For example, you may wish to create hourly rollups + * of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The + * cron pattern is defined just like a Watcher cron schedule. */ cron: string + /** Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be + * available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of + * the groups configuration as defining a set of tools that can later be used in aggregations to partition the + * data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide + * enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. */ groups: RollupGroupings + /** The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to + * rollup the entire index or index-pattern. */ index_pattern: string + /** Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each + * group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined + * on a per-field basis and for each field you configure which metric should be collected. */ metrics?: RollupFieldMetric[] + /** The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends + * to execute faster, but requires more memory during processing. This value has no effect on how the data is + * rolled up; it is merely used for tweaking the speed or memory cost of the indexer. */ page_size: integer + /** The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. */ rollup_index: IndexName + /** Time to wait for the request to complete. */ timeout?: Duration headers?: HttpHeaders + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, cron?: never, groups?: never, index_pattern?: never, metrics?: never, page_size?: never, rollup_index?: never, timeout?: never, headers?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, cron?: never, groups?: never, index_pattern?: never, metrics?: never, page_size?: never, rollup_index?: never, timeout?: never, headers?: never } } export type RollupPutJobResponse = AcknowledgedResponseBase export interface RollupRollupSearchRequest extends RequestBase { + /** A comma-separated list of data streams and indices used to limit the request. + * This parameter has the following rules: + * + * * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. + * * Multiple non-rollup indices may be specified. + * * Only one rollup index may be specified. If more than one are supplied, an exception occurs. + * * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. */ index: Indices + /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ rest_total_hits_as_int?: boolean + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean + /** Specifies aggregations. */ aggregations?: Record - /** @alias aggregations */ + /** Specifies aggregations. + * @alias aggregations */ aggs?: Record + /** Specifies a DSL query that is subject to some limitations. */ query?: QueryDslQueryContainer + /** Must be zero if set, as rollups work on pre-aggregated data. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, rest_total_hits_as_int?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, rest_total_hits_as_int?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, size?: never } } export interface RollupRollupSearchResponse> { @@ -18917,7 +31936,12 @@ export interface RollupRollupSearchResponse } export interface SearchApplicationSearchApplicationParameters { + /** Indices that are part of the Search Application. */ indices: IndexName[] + /** Analytics collection associated to the Search Application. */ analytics_collection_name?: Name + /** Search template to use on search operations. */ template?: SearchApplicationSearchApplicationTemplate } export interface SearchApplicationSearchApplicationTemplate { + /** The associated mustache template. */ script: Script | string } export interface SearchApplicationDeleteRequest extends RequestBase { + /** The name of the search application to delete. */ name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SearchApplicationDeleteResponse = AcknowledgedResponseBase export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { + /** The name of the analytics collection to be deleted */ name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase export interface SearchApplicationGetRequest extends RequestBase { + /** The name of the search application */ name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SearchApplicationGetResponse = SearchApplicationSearchApplication export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { + /** A list of analytics collections to limit the returned information */ name?: Name[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SearchApplicationGetBehavioralAnalyticsResponse = Record export interface SearchApplicationListRequest extends RequestBase { + /** Query in the Lucene query string syntax. */ q?: string + /** Starting offset. */ from?: integer + /** Specifies a max number of results to get. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { q?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { q?: never, from?: never, size?: never } } export interface SearchApplicationListResponse { @@ -18995,10 +32064,17 @@ export interface SearchApplicationListResponse { } export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { + /** The name of the behavioral analytics collection. */ collection_name: Name + /** The analytics event type. */ event_type: SearchApplicationEventType + /** Whether the response type has to include more details */ debug?: boolean payload?: any + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { collection_name?: never, event_type?: never, debug?: never, payload?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { collection_name?: never, event_type?: never, debug?: never, payload?: never } } export interface SearchApplicationPostBehavioralAnalyticsEventResponse { @@ -19007,9 +32083,15 @@ export interface SearchApplicationPostBehavioralAnalyticsEventResponse { } export interface SearchApplicationPutRequest extends RequestBase { + /** The name of the search application to be created or updated. */ name: Name + /** If `true`, this request cannot replace or update existing Search Applications. */ create?: boolean search_application?: SearchApplicationSearchApplicationParameters + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, search_application?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, search_application?: never } } export interface SearchApplicationPutResponse { @@ -19017,27 +32099,45 @@ export interface SearchApplicationPutResponse { } export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { + /** The name of the analytics collection created or updated */ name: Name } export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { + /** The name of the analytics collection to be created or updated. */ name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase export interface SearchApplicationRenderQueryRequest extends RequestBase { + /** The name of the search application to render teh query for. */ name: Name params?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, params?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, params?: never } } export interface SearchApplicationRenderQueryResponse { } export interface SearchApplicationSearchRequest extends RequestBase { + /** The name of the search application to be searched. */ name: Name + /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean + /** Query parameters specific to this request, which will override any defaults specified in the template. */ params?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, typed_keys?: never, params?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, typed_keys?: never, params?: never } } export type SearchApplicationSearchResponse> = SearchResponseBody @@ -19049,8 +32149,13 @@ export interface SearchableSnapshotsCacheStatsNode { } export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { + /** The names of the nodes in the cluster to target. */ node_id?: NodeIds master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never } } export interface SearchableSnapshotsCacheStatsResponse { @@ -19069,10 +32174,19 @@ export interface SearchableSnapshotsCacheStatsShared { } export interface SearchableSnapshotsClearCacheRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to clear from the cache. + * It supports wildcards (`*`). */ index?: Indices + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_unavailable?: never } } export type SearchableSnapshotsClearCacheResponse = any @@ -19084,15 +32198,31 @@ export interface SearchableSnapshotsMountMountedSnapshot { } export interface SearchableSnapshotsMountRequest extends RequestBase { + /** The name of the repository containing the snapshot of the index to mount. */ repository: Name + /** The name of the snapshot of the index to mount. */ snapshot: Name + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** If true, the request blocks until the operation is complete. */ wait_for_completion?: boolean + /** The mount option for the searchable snapshot index. */ storage?: string + /** The name of the index contained in the snapshot whose data is to be mounted. + * If no `renamed_index` is specified, this name will also be used to create the new index. */ index: IndexName + /** The name of the index that will be created. */ renamed_index?: IndexName + /** The settings that should be added to the index when it is mounted. */ index_settings?: Record + /** The names of settings that should be removed from the index when it is mounted. */ ignore_index_settings?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, storage?: never, index?: never, renamed_index?: never, index_settings?: never, ignore_index_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, storage?: never, index?: never, renamed_index?: never, index_settings?: never, ignore_index_settings?: never } } export interface SearchableSnapshotsMountResponse { @@ -19100,8 +32230,14 @@ export interface SearchableSnapshotsMountResponse { } export interface SearchableSnapshotsStatsRequest extends RequestBase { + /** A comma-separated list of data streams and indices to retrieve statistics for. */ index?: Indices + /** Return stats aggregated at cluster, index or shard level */ level?: SearchableSnapshotsStatsLevel + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, level?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, level?: never } } export interface SearchableSnapshotsStatsResponse { @@ -19110,26 +32246,51 @@ export interface SearchableSnapshotsStatsResponse { } export interface SecurityAccess { + /** A list of indices permission entries for cross-cluster replication. */ replication?: SecurityReplicationAccess[] + /** A list of indices permission entries for cross-cluster search. */ search?: SecuritySearchAccess[] } export interface SecurityApiKey { + /** Id for the API key */ id: Id + /** Name of the API key. */ name: Name + /** The type of the API key (e.g. `rest` or `cross_cluster`). */ type: SecurityApiKeyType + /** Creation time for the API key in milliseconds. */ creation: EpochTime + /** Expiration time for the API key in milliseconds. */ expiration?: EpochTime + /** Invalidation status for the API key. + * If the key has been invalidated, it has a value of `true`. Otherwise, it is `false`. */ invalidated: boolean + /** If the key has been invalidated, invalidation time in milliseconds. */ invalidation?: EpochTime + /** Principal for which this API key was created */ username: Username + /** Realm name of the principal for which this API key was created. */ realm: string + /** Realm type of the principal for which this API key was created */ realm_type?: string + /** Metadata of the API key */ metadata: Metadata + /** The role descriptors assigned to this API key when it was created or last updated. + * An empty role descriptor means the API key inherits the owner user’s permissions. */ role_descriptors?: Record + /** The owner user’s permissions associated with the API key. + * It is a point-in-time snapshot captured at creation and subsequent updates. + * An API key’s effective permissions are an intersection of its assigned privileges and the owner user’s permissions. */ limited_by?: Record[] + /** The access granted to cross-cluster API keys. + * The access is composed of permissions for cross cluster search and cross cluster replication. + * At least one of them must be specified. + * When specified, the new access assignment fully replaces the previously assigned access. */ access?: SecurityAccess + /** The profile uid for the API key owner principal, if requested and if it exists */ profile_uid?: string + /** Sorting values when using the `sort` parameter with the `security.query_api_keys` API. */ _sort?: SortResults } @@ -19140,13 +32301,18 @@ export interface SecurityApplicationGlobalUserPrivileges { } export interface SecurityApplicationPrivileges { + /** The name of the application to which this entry applies. */ application: string + /** A list of strings, where each element is the name of an application privilege or action. */ privileges: string[] + /** A list resources to which the privileges are applied. */ resources: string[] } export interface SecurityBulkError { + /** The number of errors */ count: integer + /** Details about the errors, keyed by role name */ details: Record } @@ -19180,10 +32346,16 @@ export type SecurityGrantType = 'password' | 'access_token' export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string export interface SecurityIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } @@ -19201,65 +32373,117 @@ export interface SecurityRealmInfo { export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' export interface SecurityRemoteClusterPrivileges { + /** A list of cluster aliases to which the permissions in this entry apply. */ clusters: Names + /** The cluster level privileges that owners of the role have on the remote cluster. */ privileges: SecurityRemoteClusterPrivilege[] } export interface SecurityRemoteIndicesPrivileges { + /** A list of cluster aliases to which the permissions in this entry apply. */ clusters: Names + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } export interface SecurityRemoteUserIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity[] + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery[] + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ allow_restricted_indices: boolean clusters: string[] } export interface SecurityReplicationAccess { + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** This needs to be set to true if the patterns in the names field should cover system indices. */ allow_restricted_indices?: boolean } export interface SecurityRestriction { + /** A list of workflows to which the API key is restricted. + * NOTE: In order to use a role restriction, an API key must be created with a single role descriptor. */ workflows: SecurityRestrictionWorkflow[] } export type SecurityRestrictionWorkflow = 'search_application_query' | string export interface SecurityRoleDescriptor { + /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ cluster?: SecurityClusterPrivilege[] + /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[] + /** A list of indices permissions entries. + * @alias indices */ index?: SecurityIndicesPrivileges[] + /** A list of indices permissions for remote clusters. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of cluster permissions for remote clusters. + * NOTE: This is limited a subset of the cluster permissions. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + /** A list of application privilege entries */ applications?: SecurityApplicationPrivileges[] + /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ metadata?: Metadata + /** A list of users that the API keys can impersonate. + * NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + * For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ run_as?: string[] + /** Optional description of the role descriptor */ description?: string + /** Restriction for when the role descriptor is allowed to be effective. */ restriction?: SecurityRestriction transient_metadata?: Record } export interface SecurityRoleDescriptorRead { + /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ cluster: SecurityClusterPrivilege[] + /** A list of indices permissions entries. */ indices: SecurityIndicesPrivileges[] + /** A list of indices permissions entries. + * @alias indices */ index: SecurityIndicesPrivileges[] + /** A list of indices permissions for remote clusters. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of cluster permissions for remote clusters. + * NOTE: This is limited a subset of the cluster permissions. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + /** A list of application privilege entries */ applications?: SecurityApplicationPrivileges[] + /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ metadata?: Metadata + /** A list of users that the API keys can impersonate. */ run_as?: string[] + /** An optional description of the role descriptor. */ description?: string + /** A restriction for when the role descriptor is allowed to be effective. */ restriction?: SecurityRestriction transient_metadata?: Record } @@ -19287,21 +32511,34 @@ export interface SecurityRoleTemplate { export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer export interface SecurityRoleTemplateQuery { + /** When you create a role, you can specify a query that defines the document level security permissions. You can optionally + * use Mustache templates in the role query to insert the username of the current authenticated user into the role. + * Like other places in Elasticsearch that support templating or scripting, you can specify inline, stored, or file-based + * templates and define custom parameters. You access the details for the current authenticated user through the _user parameter. */ template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery } export interface SecurityRoleTemplateScript { source?: SecurityRoleTemplateInlineQuery + /** The `id` for a stored script. */ id?: Id + /** Specifies any named parameters that are passed into the script as variables. + * Use parameters instead of hard-coded values to decrease compile time. */ params?: Record + /** Specifies the language the script is written in. */ lang?: ScriptLanguage options?: Record } export interface SecuritySearchAccess { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } @@ -19322,10 +32559,15 @@ export interface SecurityUser { } export interface SecurityUserIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity[] + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery[] + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ allow_restricted_indices: boolean } @@ -19359,10 +32601,25 @@ export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { } export interface SecurityActivateUserProfileRequest extends RequestBase { + /** The user's Elasticsearch access token or JWT. + * Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. + * If you specify the `access_token` grant type, this parameter is required. + * It is not valid with other grant types. */ access_token?: string + /** The type of grant. */ grant_type: SecurityGrantType + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ password?: string + /** The username that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ username?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { access_token?: never, grant_type?: never, password?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { access_token?: never, grant_type?: never, password?: never, username?: never } } export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata @@ -19373,6 +32630,10 @@ export interface SecurityAuthenticateAuthenticateApiKey { } export interface SecurityAuthenticateRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityAuthenticateResponse { @@ -19395,33 +32656,70 @@ export interface SecurityAuthenticateToken { } export interface SecurityBulkDeleteRoleRequest extends RequestBase { + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** An array of role names to delete */ names: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, names?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, names?: never } } export interface SecurityBulkDeleteRoleResponse { + /** Array of deleted roles */ deleted?: string[] + /** Array of roles that could not be found */ not_found?: string[] + /** Present if any deletes resulted in errors */ errors?: SecurityBulkError } export interface SecurityBulkPutRoleRequest extends RequestBase { + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** A dictionary of role name to RoleDescriptor objects to add or update */ roles: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, roles?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, roles?: never } } export interface SecurityBulkPutRoleResponse { + /** Array of created roles */ created?: string[] + /** Array of updated roles */ updated?: string[] + /** Array of role names without any changes */ noop?: string[] + /** Present if any updates resulted in errors */ errors?: SecurityBulkError } export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { + /** Expiration time for the API keys. + * By default, API keys never expire. + * This property can be omitted to leave the value unchanged. */ expiration?: Duration + /** The API key identifiers. */ ids: string | string[] + /** Arbitrary nested metadata to associate with the API keys. + * Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. + * Any information specified with this parameter fully replaces metadata previously associated with the API key. */ metadata?: Metadata + /** The role descriptors to assign to the API keys. + * An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. + * You can assign new privileges by specifying them in this parameter. + * To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. + * If an API key has no assigned privileges, it inherits the owner user's full permissions. + * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. + * The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { expiration?: never, ids?: never, metadata?: never, role_descriptors?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { expiration?: never, ids?: never, metadata?: never, role_descriptors?: never } } export interface SecurityBulkUpdateApiKeysResponse { @@ -19431,17 +32729,36 @@ export interface SecurityBulkUpdateApiKeysResponse { } export interface SecurityChangePasswordRequest extends RequestBase { + /** The user whose password you want to change. If you do not specify this + * parameter, the password is changed for the current user. */ username?: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** The new password value. Passwords must be at least 6 characters long. */ password?: Password + /** A hash of the new password value. This must be produced using the same + * hashing algorithm as has been configured for password storage. For more details, + * see the explanation of the `xpack.security.authc.password_hashing.algorithm` + * setting. */ password_hash?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never, password?: never, password_hash?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never, password?: never, password_hash?: never } } export interface SecurityChangePasswordResponse { } export interface SecurityClearApiKeyCacheRequest extends RequestBase { + /** Comma-separated list of API key IDs to evict from the API key cache. + * To evict all API keys, use `*`. + * Does not support other wildcard patterns. */ ids: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ids?: never } } export interface SecurityClearApiKeyCacheResponse { @@ -19451,7 +32768,14 @@ export interface SecurityClearApiKeyCacheResponse { } export interface SecurityClearCachedPrivilegesRequest extends RequestBase { + /** A comma-separated list of applications. + * To clear all applications, use an asterism (`*`). + * It does not support other wildcard patterns. */ application: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never } } export interface SecurityClearCachedPrivilegesResponse { @@ -19461,8 +32785,17 @@ export interface SecurityClearCachedPrivilegesResponse { } export interface SecurityClearCachedRealmsRequest extends RequestBase { + /** A comma-separated list of realms. + * To clear all realms, use an asterisk (`*`). + * It does not support other wildcard patterns. */ realms: Names + /** A comma-separated list of the users to clear from the cache. + * If you do not specify this parameter, the API evicts all users from the user cache. */ usernames?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realms?: never, usernames?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realms?: never, usernames?: never } } export interface SecurityClearCachedRealmsResponse { @@ -19472,7 +32805,14 @@ export interface SecurityClearCachedRealmsResponse { } export interface SecurityClearCachedRolesRequest extends RequestBase { + /** A comma-separated list of roles to evict from the role cache. + * To evict all roles, use an asterisk (`*`). + * It does not support other wildcard patterns. */ name: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export interface SecurityClearCachedRolesResponse { @@ -19482,9 +32822,18 @@ export interface SecurityClearCachedRolesResponse { } export interface SecurityClearCachedServiceTokensRequest extends RequestBase { + /** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace + /** The name of the service, which must be unique within its namespace. */ service: Service + /** A comma-separated list of token names to evict from the service account token caches. + * Use a wildcard (`*`) to evict all tokens that belong to a service account. + * It does not support other wildcard patterns. */ name: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never } } export interface SecurityClearCachedServiceTokensResponse { @@ -19494,41 +32843,104 @@ export interface SecurityClearCachedServiceTokensResponse { } export interface SecurityCreateApiKeyRequest extends RequestBase { + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** The expiration time for the API key. + * By default, API keys never expire. */ expiration?: Duration + /** A name for the API key. */ name?: Name + /** An array of role descriptors for this API key. + * When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. + * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. + * The structure of role descriptor is the same as the request for the create role API. + * For more details, refer to the create or update roles API. + * + * NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. + * In this case, you must explicitly specify a role descriptor with no privileges. + * The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. */ role_descriptors?: Record + /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, expiration?: never, name?: never, role_descriptors?: never, metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, expiration?: never, name?: never, role_descriptors?: never, metadata?: never } } export interface SecurityCreateApiKeyResponse { + /** Generated API key. */ api_key: string + /** Expiration in milliseconds for the API key. */ expiration?: long + /** Unique ID for this API key. */ id: Id + /** Specifies the name for this API key. */ name: Name + /** API key credentials which is the base64-encoding of + * the UTF-8 representation of `id` and `api_key` joined + * by a colon (`:`). */ encoded: string } export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { + /** The access to be granted to this API key. + * The access is composed of permissions for cross-cluster search and cross-cluster replication. + * At least one of them must be specified. + * + * NOTE: No explicit privileges should be specified for either search or replication access. + * The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. */ access: SecurityAccess + /** Expiration time for the API key. + * By default, API keys never expire. */ expiration?: Duration + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata + /** Specifies the name for this API key. */ name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never } } export interface SecurityCreateCrossClusterApiKeyResponse { + /** Generated API key. */ api_key: string + /** Expiration in milliseconds for the API key. */ expiration?: DurationValue + /** Unique ID for this API key. */ id: Id + /** Specifies the name for this API key. */ name: Name + /** API key credentials which is the base64-encoding of + * the UTF-8 representation of `id` and `api_key` joined + * by a colon (`:`). */ encoded: string } export interface SecurityCreateServiceTokenRequest extends RequestBase { + /** The name of the namespace, which is a top-level grouping of service accounts. */ namespace: Namespace + /** The name of the service. */ service: Service + /** The name for the service account token. + * If omitted, a random name will be generated. + * + * Token names must be at least one and no more than 256 characters. + * They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + * + * NOTE: Token names must be unique in the context of the associated service account. + * They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ name?: Name + /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } } export interface SecurityCreateServiceTokenResponse { @@ -19562,12 +32974,24 @@ export interface SecurityDelegatePkiAuthenticationRealm { } export interface SecurityDelegatePkiRequest extends RequestBase { + /** The X509Certificate chain, which is represented as an ordered string array. + * Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + * + * The first element is the target certificate that contains the subject distinguished name that is requesting access. + * This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. */ x509_certificate_chain: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { x509_certificate_chain?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { x509_certificate_chain?: never } } export interface SecurityDelegatePkiResponse { + /** An access token associated with the subject distinguished name of the client's certificate. */ access_token: string + /** The amount of time (in seconds) before the token expires. */ expires_in: long + /** The type of token. */ type: string authentication?: SecurityDelegatePkiAuthentication } @@ -19577,115 +33001,227 @@ export interface SecurityDeletePrivilegesFoundStatus { } export interface SecurityDeletePrivilegesRequest extends RequestBase { + /** The name of the application. + * Application privileges are always associated with exactly one application. */ application: Name + /** The name of the privilege. */ name: Names + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never, name?: never, refresh?: never } } export type SecurityDeletePrivilegesResponse = Record> export interface SecurityDeleteRoleRequest extends RequestBase { + /** The name of the role. */ name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never } } export interface SecurityDeleteRoleResponse { + /** If the role is successfully deleted, `found` is `true`. + * Otherwise, `found` is `false`. */ found: boolean } export interface SecurityDeleteRoleMappingRequest extends RequestBase { + /** The distinct name that identifies the role mapping. + * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never } } export interface SecurityDeleteRoleMappingResponse { + /** If the mapping is successfully deleted, `found` is `true`. + * Otherwise, `found` is `false`. */ found: boolean } export interface SecurityDeleteServiceTokenRequest extends RequestBase { + /** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace + /** The service name. */ service: Service + /** The name of the service account token. */ name: Name + /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } } export interface SecurityDeleteServiceTokenResponse { + /** If the service account token is successfully deleted, the request returns `{"found": true}`. + * Otherwise, the response will have status code 404 and `found` is set to `false`. */ found: boolean } export interface SecurityDeleteUserRequest extends RequestBase { + /** An identifier for the user. */ username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } } export interface SecurityDeleteUserResponse { + /** If the user is successfully deleted, the request returns `{"found": true}`. + * Otherwise, `found` is set to `false`. */ found: boolean } export interface SecurityDisableUserRequest extends RequestBase { + /** An identifier for the user. */ username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } } export interface SecurityDisableUserResponse { } export interface SecurityDisableUserProfileRequest extends RequestBase { + /** Unique identifier for the user profile. */ uid: SecurityUserProfileId + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', it does nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, refresh?: never } } export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase export interface SecurityEnableUserRequest extends RequestBase { + /** An identifier for the user. */ username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } } export interface SecurityEnableUserResponse { } export interface SecurityEnableUserProfileRequest extends RequestBase { + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, refresh?: never } } export type SecurityEnableUserProfileResponse = AcknowledgedResponseBase export interface SecurityEnrollKibanaRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityEnrollKibanaResponse { token: SecurityEnrollKibanaToken + /** The CA certificate used to sign the node certificates that Elasticsearch uses for TLS on the HTTP layer. + * The certificate is returned as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ http_ca: string } export interface SecurityEnrollKibanaToken { + /** The name of the bearer token for the `elastic/kibana` service account. */ name: string + /** The value of the bearer token for the `elastic/kibana` service account. + * Use this value to authenticate the service account with Elasticsearch. */ value: string } export interface SecurityEnrollNodeRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityEnrollNodeResponse { + /** The CA private key that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ http_ca_key: string + /** The CA certificate that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ http_ca_cert: string + /** The CA certificate that is used to sign the TLS certificate for the transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ transport_ca_cert: string + /** The private key that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ transport_key: string + /** The certificate that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ transport_cert: string + /** A list of transport addresses in the form of `host:port` for the nodes that are already members of the cluster. */ nodes_addresses: string[] } export interface SecurityGetApiKeyRequest extends RequestBase { + /** An API key id. + * This parameter cannot be used with any of `name`, `realm_name` or `username`. */ id?: Id + /** An API key name. + * This parameter cannot be used with any of `id`, `realm_name` or `username`. + * It supports prefix search with wildcard. */ name?: Name + /** A boolean flag that can be used to query API keys owned by the currently authenticated user. + * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. */ owner?: boolean + /** The name of an authentication realm. + * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ realm_name?: Name + /** The username of a user. + * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ username?: Username + /** Return the snapshot of the owner user's role descriptors + * associated with the API key. An API key's actual + * permission is the intersection of its assigned role + * descriptors and the owner user's role descriptors. */ with_limited_by?: boolean + /** A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. */ active_only?: boolean + /** Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. */ with_profile_uid?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, name?: never, owner?: never, realm_name?: never, username?: never, with_limited_by?: never, active_only?: never, with_profile_uid?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, name?: never, owner?: never, realm_name?: never, username?: never, with_limited_by?: never, active_only?: never, with_profile_uid?: never } } export interface SecurityGetApiKeyResponse { @@ -19693,23 +33229,47 @@ export interface SecurityGetApiKeyResponse { } export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityGetBuiltinPrivilegesResponse { + /** The list of cluster privileges that are understood by this version of Elasticsearch. */ cluster: SecurityClusterPrivilege[] + /** The list of index privileges that are understood by this version of Elasticsearch. */ index: IndexName[] + /** The list of remote_cluster privileges that are understood by this version of Elasticsearch. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster: SecurityRemoteClusterPrivilege[] } export interface SecurityGetPrivilegesRequest extends RequestBase { + /** The name of the application. + * Application privileges are always associated with exactly one application. + * If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name + /** The name of the privilege. + * If you do not specify this parameter, the API returns information about all privileges for the requested application. */ name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never, name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never, name?: never } } export type SecurityGetPrivilegesResponse = Record> export interface SecurityGetRoleRequest extends RequestBase { + /** The name of the role. + * You can specify multiple roles as a comma-separated list. + * If you do not specify this parameter, the API returns information about all roles. */ name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SecurityGetRoleResponse = Record @@ -19717,7 +33277,9 @@ export type SecurityGetRoleResponse = Record export interface SecurityGetRoleRole { cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] + /** @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] metadata: Metadata description?: string @@ -19729,14 +33291,28 @@ export interface SecurityGetRoleRole { } export interface SecurityGetRoleMappingRequest extends RequestBase { + /** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. */ name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } } export type SecurityGetRoleMappingResponse = Record export interface SecurityGetServiceAccountsRequest extends RequestBase { + /** The name of the namespace. + * Omit this parameter to retrieve information about all service accounts. + * If you omit this parameter, you must also omit the `service` parameter. */ namespace?: Namespace + /** The service name. + * Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. */ service?: Service + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never } } export type SecurityGetServiceAccountsResponse = Record @@ -19746,7 +33322,9 @@ export interface SecurityGetServiceAccountsRoleDescriptorWrapper { } export interface SecurityGetServiceCredentialsNodesCredentials { + /** General status showing how nodes respond to the above collection request */ _nodes: NodeStatistics + /** File-backed tokens collected from all nodes */ file_tokens: Record } @@ -19755,24 +33333,40 @@ export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { } export interface SecurityGetServiceCredentialsRequest extends RequestBase { + /** The name of the namespace. */ namespace: Namespace + /** The service name. */ service: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never } } export interface SecurityGetServiceCredentialsResponse { service_account: string count: integer tokens: Record + /** Service account credentials collected from all nodes of the cluster. */ nodes_credentials: SecurityGetServiceCredentialsNodesCredentials } export interface SecurityGetSettingsRequest extends RequestBase { + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface SecurityGetSettingsResponse { + /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security: SecuritySecuritySettings + /** Settings for the index used to store profile information. */ 'security-profile': SecuritySecuritySettings + /** Settings for the index used to store tokens. */ 'security-tokens': SecuritySecuritySettings } @@ -19791,12 +33385,32 @@ export interface SecurityGetTokenAuthenticationProvider { } export interface SecurityGetTokenRequest extends RequestBase { + /** The type of grant. + * Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. */ grant_type?: SecurityGetTokenAccessTokenGrantType + /** The scope of the token. + * Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. */ scope?: string + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ password?: Password + /** The base64 encoded kerberos ticket. + * If you specify the `_kerberos` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ kerberos_ticket?: string + /** The string that was returned when you created the token, which enables you to extend its life. + * If you specify the `refresh_token` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ refresh_token?: string + /** The username that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { grant_type?: never, scope?: never, password?: never, kerberos_ticket?: never, refresh_token?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { grant_type?: never, scope?: never, password?: never, kerberos_ticket?: never, refresh_token?: never, username?: never } } export interface SecurityGetTokenResponse { @@ -19815,13 +33429,23 @@ export interface SecurityGetTokenUserRealm { } export interface SecurityGetUserRequest extends RequestBase { + /** An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. */ username?: Username | Username[] + /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, with_profile_uid?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, with_profile_uid?: never } } export type SecurityGetUserResponse = Record export interface SecurityGetUserPrivilegesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export interface SecurityGetUserPrivilegesResponse { @@ -19840,11 +33464,23 @@ export interface SecurityGetUserProfileGetUserProfileErrors { } export interface SecurityGetUserProfileRequest extends RequestBase { + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId | SecurityUserProfileId[] + /** A comma-separated list of filters for the `data` field of the profile document. + * To return all content use `data=*`. + * To return a subset of content use `data=` to retrieve content nested under the specified ``. + * By default returns no `data` content. */ data?: string | string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, data?: never } } export interface SecurityGetUserProfileResponse { + /** A successful call returns the JSON representation of the user profile and its internal versioning numbers. + * The API returns an empty object if no profile document is found for the provided `uid`. + * The content of the data field is not returned by default to avoid deserializing a potential large payload. */ profiles: SecurityUserProfileWithMetadata[] errors?: SecurityGetUserProfileGetUserProfileErrors } @@ -19853,19 +33489,46 @@ export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { name: Name + /** Expiration time for the API key. By default, API keys never expire. */ expiration?: DurationLarge + /** The role descriptors for this API key. + * When it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token. + * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the permissions of the user or access token. */ role_descriptors?: Record | Record[] + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the `metadata` object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata } export interface SecurityGrantApiKeyRequest extends RequestBase { + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ refresh?: Refresh + /** The API key. */ api_key: SecurityGrantApiKeyGrantApiKey + /** The type of grant. Supported grant types are: `access_token`, `password`. */ grant_type: SecurityGrantApiKeyApiKeyGrantType + /** The user's access token. + * If you specify the `access_token` grant type, this parameter is required. + * It is not valid with other grant types. */ access_token?: string + /** The user name that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ username?: Username + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ password?: Password + /** The name of the user to be impersonated. */ run_as?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } } export interface SecurityGrantApiKeyResponse { @@ -19877,26 +33540,41 @@ export interface SecurityGrantApiKeyResponse { } export interface SecurityHasPrivilegesApplicationPrivilegesCheck { + /** The name of the application. */ application: string + /** A list of the privileges that you want to check for the specified resources. + * It may be either application privilege names or the names of actions that are granted by those privileges */ privileges: string[] + /** A list of resource names against which the privileges should be checked. */ resources: string[] } export type SecurityHasPrivilegesApplicationsPrivileges = Record export interface SecurityHasPrivilegesIndexPrivilegesCheck { + /** A list of indices. */ names: Indices + /** A list of the privileges that you want to check for the specified indices. */ privileges: SecurityIndexPrivilege[] + /** This needs to be set to `true` (default is `false`) if using wildcards or regexps for patterns that cover restricted indices. + * Implicitly, restricted indices do not match index patterns because restricted indices usually have limited privileges and including them in pattern tests would render most such tests false. + * If restricted indices are explicitly included in the names list, privileges will be checked against them regardless of the value of `allow_restricted_indices`. */ allow_restricted_indices?: boolean } export type SecurityHasPrivilegesPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { + /** Username */ user?: Name application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + /** A list of the cluster privileges that you want to check. */ cluster?: SecurityClusterPrivilege[] index?: SecurityHasPrivilegesIndexPrivilegesCheck[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { user?: never, application?: never, cluster?: never, index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { user?: never, application?: never, cluster?: never, index?: never } } export type SecurityHasPrivilegesResourcePrivileges = Record @@ -19916,84 +33594,175 @@ export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + /** A list of the cluster privileges that you want to check. */ cluster?: SecurityClusterPrivilege[] index?: SecurityHasPrivilegesIndexPrivilegesCheck[] } export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { + /** A list of profile IDs. The privileges are checked for associated users of the profiles. */ uids: SecurityUserProfileId[] + /** An object containing all the privileges to be checked. */ privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uids?: never, privileges?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uids?: never, privileges?: never } } export interface SecurityHasPrivilegesUserProfileResponse { + /** The subset of the requested profile IDs of the users that + * have all the requested privileges. */ has_privilege_uids: SecurityUserProfileId[] + /** The subset of the requested profile IDs for which an error + * was encountered. It does not include the missing profile IDs + * or the profile IDs of the users that do not have all the + * requested privileges. This field is absent if empty. */ errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors } export interface SecurityInvalidateApiKeyRequest extends RequestBase { id?: Id + /** A list of API key ids. + * This parameter cannot be used with any of `name`, `realm_name`, or `username`. */ ids?: Id[] + /** An API key name. + * This parameter cannot be used with any of `ids`, `realm_name` or `username`. */ name?: Name + /** Query API keys owned by the currently authenticated user. + * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + * + * NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. */ owner?: boolean + /** The name of an authentication realm. + * This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. */ realm_name?: string + /** The username of a user. + * This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. */ username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, ids?: never, name?: never, owner?: never, realm_name?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, ids?: never, name?: never, owner?: never, realm_name?: never, username?: never } } export interface SecurityInvalidateApiKeyResponse { + /** The number of errors that were encountered when invalidating the API keys. */ error_count: integer + /** Details about the errors. + * This field is not present in the response when `error_count` is `0`. */ error_details?: ErrorCause[] + /** The IDs of the API keys that were invalidated as part of this request. */ invalidated_api_keys: string[] + /** The IDs of the API keys that were already invalidated. */ previously_invalidated_api_keys: string[] } export interface SecurityInvalidateTokenRequest extends RequestBase { + /** An access token. + * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ token?: string + /** A refresh token. + * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ refresh_token?: string + /** The name of an authentication realm. + * This parameter cannot be used with either `refresh_token` or `token`. */ realm_name?: Name + /** The username of a user. + * This parameter cannot be used with either `refresh_token` or `token`. */ username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never, realm_name?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never, realm_name?: never, username?: never } } export interface SecurityInvalidateTokenResponse { + /** The number of errors that were encountered when invalidating the tokens. */ error_count: long + /** Details about the errors. + * This field is not present in the response when `error_count` is `0`. */ error_details?: ErrorCause[] + /** The number of the tokens that were invalidated as part of this request. */ invalidated_tokens: long + /** The number of tokens that were already invalidated. */ previously_invalidated_tokens: long } export interface SecurityOidcAuthenticateRequest extends RequestBase { + /** Associate a client session with an ID token and mitigate replay attacks. + * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ nonce: string + /** The name of the OpenID Connect realm. + * This property is useful in cases where multiple realms are defined. */ realm?: string + /** The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. + * This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. */ redirect_uri: string + /** Maintain state between the authentication request and the response. + * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ state: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { nonce?: never, realm?: never, redirect_uri?: never, state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { nonce?: never, realm?: never, redirect_uri?: never, state?: never } } export interface SecurityOidcAuthenticateResponse { + /** The Elasticsearch access token. */ access_token: string + /** The duration (in seconds) of the tokens. */ expires_in: integer + /** The Elasticsearch refresh token. */ refresh_token: string + /** The type of token. */ type: string } export interface SecurityOidcLogoutRequest extends RequestBase { + /** The access token to be invalidated. */ token: string + /** The refresh token to be invalidated. */ refresh_token?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never } } export interface SecurityOidcLogoutResponse { + /** A URI that points to the end session endpoint of the OpenID Connect Provider with all the parameters of the logout request as HTTP GET parameters. */ redirect: string } export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { + /** In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. + * It cannot be specified when *realm* is specified. + * One of *realm* or *iss* is required. */ iss?: string + /** In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. + * This parameter is not valid when *realm* is specified. */ login_hint?: string + /** The value used to associate a client session with an ID token and to mitigate replay attacks. + * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ nonce?: string + /** The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. + * It cannot be specified when *iss* is specified. + * One of *realm* or *iss* is required. */ realm?: string + /** The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. + * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ state?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { iss?: never, login_hint?: never, nonce?: never, realm?: never, state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { iss?: never, login_hint?: never, nonce?: never, realm?: never, state?: never } } export interface SecurityOidcPrepareAuthenticationResponse { nonce: string realm: string + /** A URI that points to the authorization endpoint of the OpenID Connect Provider with all the parameters of the authentication request as HTTP GET parameters. */ redirect: string state: string } @@ -20006,40 +33775,84 @@ export interface SecurityPutPrivilegesActions { } export interface SecurityPutPrivilegesRequest extends RequestBase { + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh privileges?: Record> + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, privileges?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, privileges?: never } } export type SecurityPutPrivilegesResponse = Record> export interface SecurityPutRoleRequest extends RequestBase { + /** The name of the role. */ name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** A list of application privilege entries. */ applications?: SecurityApplicationPrivileges[] + /** A list of cluster privileges. These privileges define the cluster-level actions for users with this role. */ cluster?: SecurityClusterPrivilege[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: Record + /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[] + /** A list of remote indices permissions entries. + * + * NOTE: Remote indices are effective for remote clusters configured with the API key based model. + * They have no effect for remote clusters configured with the certificate based model. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of remote cluster permissions entries. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] + /** Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. */ metadata?: Metadata + /** A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ run_as?: string[] + /** Optional description of the role descriptor */ description?: string + /** Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. */ transient_metadata?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never, applications?: never, cluster?: never, global?: never, indices?: never, remote_indices?: never, remote_cluster?: never, metadata?: never, run_as?: never, description?: never, transient_metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never, applications?: never, cluster?: never, global?: never, indices?: never, remote_indices?: never, remote_cluster?: never, metadata?: never, run_as?: never, description?: never, transient_metadata?: never } } export interface SecurityPutRoleResponse { + /** When an existing role is updated, `created` is set to `false`. */ role: SecurityCreatedStatus } export interface SecurityPutRoleMappingRequest extends RequestBase { + /** The distinct name that identifies the role mapping. + * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh + /** Mappings that have `enabled` set to `false` are ignored when role mapping is performed. */ enabled?: boolean + /** Additional metadata that helps define which roles are assigned to each user. + * Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata + /** A list of role names that are granted to the users that match the role mapping rules. + * Exactly one of `roles` or `role_templates` must be specified. */ roles?: string[] + /** A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. + * Exactly one of `roles` or `role_templates` must be specified. */ role_templates?: SecurityRoleTemplate[] + /** The rules that determine which users should be matched by the mapping. + * A rule is a logical condition that is expressed by using a JSON DSL. */ rules?: SecurityRoleMappingRule run_as?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never, enabled?: never, metadata?: never, roles?: never, role_templates?: never, rules?: never, run_as?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never, enabled?: never, metadata?: never, roles?: never, role_templates?: never, rules?: never, run_as?: never } } export interface SecurityPutRoleMappingResponse { @@ -20048,110 +33861,266 @@ export interface SecurityPutRoleMappingResponse { } export interface SecurityPutUserRequest extends RequestBase { + /** An identifier for the user. + * + * NOTE: Usernames must be at least 1 and no more than 507 characters. + * They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. + * Leading or trailing whitespace is not allowed. */ username: Username + /** Valid values are `true`, `false`, and `wait_for`. + * These values have the same meaning as in the index API, but the default value for this API is true. */ refresh?: Refresh + /** The email of the user. */ email?: string | null + /** The full name of the user. */ full_name?: string | null + /** Arbitrary metadata that you want to associate with the user. */ metadata?: Metadata + /** The user's password. + * Passwords must be at least 6 characters long. + * When adding a user, one of `password` or `password_hash` is required. + * When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password */ password?: Password + /** A hash of the user's password. + * This must be produced using the same hashing algorithm as has been configured for password storage. + * For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. + * Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. + * The `password` parameter and the `password_hash` parameter cannot be used in the same request. */ password_hash?: string + /** A set of roles the user has. + * The roles determine the user's access permissions. + * To create a user without any roles, specify an empty list (`[]`). */ roles?: string[] + /** Specifies whether the user is enabled. */ enabled?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never, email?: never, full_name?: never, metadata?: never, password?: never, password_hash?: never, roles?: never, enabled?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never, email?: never, full_name?: never, metadata?: never, password?: never, password_hash?: never, roles?: never, enabled?: never } } export interface SecurityPutUserResponse { + /** A successful call returns a JSON structure that shows whether the user has been created or updated. + * When an existing user is updated, `created` is set to `false`. */ created: boolean } export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate export interface SecurityQueryApiKeysApiKeyAggregationContainer { + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. */ aggregations?: Record + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. + * @alias aggregations */ aggs?: Record meta?: Metadata + /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation + /** A multi-bucket aggregation that creates composite buckets from different sources. + * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ composite?: AggregationsCompositeAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ date_range?: AggregationsDateRangeAggregation + /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: SecurityQueryApiKeysApiKeyQueryContainer + /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ filters?: SecurityQueryApiKeysApiKeyFiltersAggregation missing?: AggregationsMissingAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ range?: AggregationsRangeAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ terms?: AggregationsTermsAggregation + /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ value_count?: AggregationsValueCountAggregation } export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { + /** Collection of queries from which to build buckets. */ filters?: AggregationsBuckets + /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ other_bucket?: boolean + /** The key with which the other bucket is returned. */ other_bucket_key?: string + /** By default, the named filters aggregation returns the buckets as an object. + * Set to `false` to return the buckets as an array of objects. */ keyed?: boolean } export interface SecurityQueryApiKeysApiKeyQueryContainer { + /** Matches documents matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns documents that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns documents based on their IDs. + * This query uses document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns documents that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all documents, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns documents that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns documents that contain terms within a provided range. */ range?: Partial> + /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns documents that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns documents that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns documents that contain terms matching a wildcard pattern. */ wildcard?: Partial> } export interface SecurityQueryApiKeysRequest extends RequestBase { + /** Return the snapshot of the owner user's role descriptors associated with the API key. + * An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). + * An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. */ with_limited_by?: boolean + /** Determines whether to also retrieve the profile UID for the API key owner principal. + * If it exists, the profile UID is returned under the `profile_uid` response field for each API key. */ with_profile_uid?: boolean + /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean + /** Any aggregations to run over the corpus of returned API keys. + * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. + * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, + * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + * Additionally, aggregations only run over the same subset of fields that query works with. */ aggregations?: Record - /** @alias aggregations */ + /** Any aggregations to run over the corpus of returned API keys. + * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. + * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, + * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + * Additionally, aggregations only run over the same subset of fields that query works with. + * @alias aggregations */ aggs?: Record + /** A query to filter which API keys to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following public information associated with an API key: `id`, `type`, `name`, + * `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + * + * NOTE: The queryable string values associated with API keys are internally mapped as keywords. + * Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. + * Such a match query is hence equivalent to a `term` query. */ query?: SecurityQueryApiKeysApiKeyQueryContainer + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer + /** The sort definition. + * Other than `id`, all public fields of an API key are eligible for sorting. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort + /** The number of hits to return. + * It must not be negative. + * The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer + /** The search after definition. */ search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { with_limited_by?: never, with_profile_uid?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { with_limited_by?: never, with_profile_uid?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } } export interface SecurityQueryApiKeysResponse { + /** The total number of API keys found. */ total: integer + /** The number of API keys returned in the response. */ count: integer + /** A list of API key information. */ api_keys: SecurityApiKey[] + /** The aggregations result, if requested. */ aggregations?: Record } export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { _sort?: SortResults + /** Name of the role. */ name: string } export interface SecurityQueryRoleRequest extends RequestBase { + /** A query to filter which roles to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following information associated with roles: `name`, `description`, `metadata`, + * `applications.application`, `applications.privileges`, and `applications.resources`. */ query?: SecurityQueryRoleRoleQueryContainer + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer + /** The sort definition. + * You can sort on `username`, `roles`, or `enabled`. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort + /** The number of hits to return. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer + /** The search after definition. */ search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { query?: never, from?: never, sort?: never, size?: never, search_after?: never } } export interface SecurityQueryRoleResponse { + /** The total number of roles found. */ total: integer + /** The number of roles returned in the response. */ count: integer + /** A list of roles that match the query. + * The returned role format is an extension of the role definition format. + * It adds the `transient_metadata.enabled` and the `_sort` fields. + * `transient_metadata.enabled` is set to `false` in case the role is automatically disabled, for example when the role grants privileges that are not allowed by the installed license. + * `_sort` is present when the search query sorts on some field. + * It contains the array of values that have been used for sorting. */ roles: SecurityQueryRoleQueryRole[] } export interface SecurityQueryRoleRoleQueryContainer { + /** matches roles matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns roles that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns roles based on their IDs. + * This query uses role document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns roles that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all roles, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns roles that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns roles that contain terms within a provided range. */ range?: Partial> + /** Returns roles based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns roles that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns roles that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns roles that contain terms matching a wildcard pattern. */ wildcard?: Partial> } @@ -20160,113 +34129,238 @@ export interface SecurityQueryUserQueryUser extends SecurityUser { } export interface SecurityQueryUserRequest extends RequestBase { + /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean + /** A query to filter which users to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. */ query?: SecurityQueryUserUserQueryContainer + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer + /** The sort definition. + * Fields eligible for sorting are: `username`, `roles`, `enabled`. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort + /** The number of hits to return. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer + /** The search after definition */ search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { with_profile_uid?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { with_profile_uid?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } } export interface SecurityQueryUserResponse { + /** The total number of users found. */ total: integer + /** The number of users returned in the response. */ count: integer + /** A list of users that match the query. */ users: SecurityQueryUserQueryUser[] } export interface SecurityQueryUserUserQueryContainer { + /** Returns users based on their IDs. + * This query uses the user document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** matches users matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns users that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns users that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all users, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns users that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns users that contain terms within a provided range. */ range?: Partial> + /** Returns users based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns users that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns users that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns users that contain terms matching a wildcard pattern. */ wildcard?: Partial> } export interface SecuritySamlAuthenticateRequest extends RequestBase { + /** The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. */ content: string + /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids + /** The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. */ realm?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { content?: never, ids?: never, realm?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { content?: never, ids?: never, realm?: never } } export interface SecuritySamlAuthenticateResponse { + /** The access token that was generated by Elasticsearch. */ access_token: string + /** The authenticated user's name. */ username: string + /** The amount of time (in seconds) left until the token expires. */ expires_in: integer + /** The refresh token that was generated by Elasticsearch. */ refresh_token: string + /** The name of the realm where the user was authenticated. */ realm: string } export interface SecuritySamlCompleteLogoutRequest extends RequestBase { + /** The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. */ realm: string + /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids + /** If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. */ query_string?: string + /** If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. */ content?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realm?: never, ids?: never, query_string?: never, content?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realm?: never, ids?: never, query_string?: never, content?: never } } export type SecuritySamlCompleteLogoutResponse = boolean export interface SecuritySamlInvalidateRequest extends RequestBase { + /** The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. */ acs?: string + /** The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. + * This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. + * If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. + * In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. + * The client application must not attempt to parse or process the string in any way. */ query_string: string + /** The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. */ realm?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acs?: never, query_string?: never, realm?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acs?: never, query_string?: never, realm?: never } } export interface SecuritySamlInvalidateResponse { + /** The number of tokens that were invalidated as part of this logout. */ invalidated: integer + /** The realm name of the SAML realm in Elasticsearch that authenticated the user. */ realm: string + /** A SAML logout response as a parameter so that the user can be redirected back to the SAML IdP. */ redirect: string } export interface SecuritySamlLogoutRequest extends RequestBase { + /** The access token that was returned as a response to calling the SAML authenticate API. + * Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. */ token: string + /** The refresh token that was returned as a response to calling the SAML authenticate API. + * Alternatively, the most recent refresh token that was received after refreshing the original access token. */ refresh_token?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never } } export interface SecuritySamlLogoutResponse { + /** A URL that contains a SAML logout request as a parameter. + * You can use this URL to be redirected back to the SAML IdP and to initiate Single Logout. */ redirect: string } export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { + /** The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. + * The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. */ acs?: string + /** The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. + * You must specify either this parameter or the `acs` parameter. */ realm?: string + /** A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. + * If the Authentication Request is signed, this value is used as part of the signature computation. */ relay_state?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acs?: never, realm?: never, relay_state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acs?: never, realm?: never, relay_state?: never } } export interface SecuritySamlPrepareAuthenticationResponse { + /** A unique identifier for the SAML Request to be stored by the caller of the API. */ id: Id + /** The name of the Elasticsearch realm that was used to construct the authentication request. */ realm: string + /** The URL to redirect the user to. */ redirect: string } export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { + /** The name of the SAML realm in Elasticsearch. */ realm_name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realm_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realm_name?: never } } export interface SecuritySamlServiceProviderMetadataResponse { + /** An XML string that contains a SAML Service Provider's metadata for the realm. */ metadata: string } export interface SecuritySuggestUserProfilesHint { + /** A list of profile UIDs to match against. */ uids?: SecurityUserProfileId[] + /** A single key-value pair to match against the labels section + * of a profile. A profile is considered matching if it matches + * at least one of the strings. */ labels?: Record } export interface SecuritySuggestUserProfilesRequest extends RequestBase { + /** A query string used to match name-related fields in user profile documents. + * Name-related fields are the user's `username`, `full_name`, and `email`. */ name?: string + /** The number of profiles to return. */ size?: long + /** A comma-separated list of filters for the `data` field of the profile document. + * To return all content use `data=*`. + * To return a subset of content, use `data=` to retrieve content nested under the specified ``. + * By default, the API returns no `data` content. + * It is an error to specify `data` as both the query parameter and the request body field. */ data?: string | string[] + /** Extra search criteria to improve relevance of the suggestion result. + * Profiles matching the spcified hint are ranked higher in the response. + * Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. */ hint?: SecuritySuggestUserProfilesHint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, size?: never, data?: never, hint?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, size?: never, data?: never, hint?: never } } export interface SecuritySuggestUserProfilesResponse { + /** Metadata about the number of matching profiles. */ total: SecuritySuggestUserProfilesTotalUserProfiles + /** The number of milliseconds it took Elasticsearch to run the request. */ took: long + /** A list of profile documents, ordered by relevance, that match the search criteria. */ profiles: SecurityUserProfile[] } @@ -20276,33 +34370,82 @@ export interface SecuritySuggestUserProfilesTotalUserProfiles { } export interface SecurityUpdateApiKeyRequest extends RequestBase { + /** The ID of the API key to update. */ id: Id + /** The role descriptors to assign to this API key. + * The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. + * You can assign new privileges by specifying them in this parameter. + * To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. + * If an API key has no assigned privileges, it inherits the owner user's full permissions. + * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. + * The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record + /** Arbitrary metadata that you want to associate with the API key. + * It supports a nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. + * When specified, this value fully replaces the metadata previously associated with the API key. */ metadata?: Metadata + /** The expiration time for the API key. + * By default, API keys never expire. + * This property can be omitted to leave the expiration unchanged. */ expiration?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, role_descriptors?: never, metadata?: never, expiration?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, role_descriptors?: never, metadata?: never, expiration?: never } } export interface SecurityUpdateApiKeyResponse { + /** If `true`, the API key was updated. + * If `false`, the API key didn't change because no change was detected. */ updated: boolean } export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { + /** The ID of the cross-cluster API key to update. */ id: Id + /** The access to be granted to this API key. + * The access is composed of permissions for cross cluster search and cross cluster replication. + * At least one of them must be specified. + * When specified, the new access assignment fully replaces the previously assigned access. */ access: SecurityAccess + /** The expiration time for the API key. + * By default, API keys never expire. This property can be omitted to leave the value unchanged. */ expiration?: Duration + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. + * When specified, this information fully replaces metadata previously associated with the API key. */ metadata?: Metadata + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never } } export interface SecurityUpdateCrossClusterApiKeyResponse { + /** If `true`, the API key was updated. + * If `false`, the API key didn’t change because no change was detected. */ updated: boolean } export interface SecurityUpdateSettingsRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security?: SecuritySecuritySettings + /** Settings for the index used to store profile information. */ 'security-profile'?: SecuritySecuritySettings + /** Settings for the index used to store tokens. */ 'security-tokens'?: SecuritySecuritySettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, security?: never, 'security-profile'?: never, 'security-tokens'?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, security?: never, 'security-profile'?: never, 'security-tokens'?: never } } export interface SecurityUpdateSettingsResponse { @@ -20310,12 +34453,30 @@ export interface SecurityUpdateSettingsResponse { } export interface SecurityUpdateUserProfileDataRequest extends RequestBase { + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId + /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber + /** Only perform the operation if the document has this primary term. */ if_primary_term?: long + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ refresh?: Refresh + /** Searchable data that you want to associate with the user profile. + * This field supports a nested data structure. + * Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). */ labels?: Record + /** Non-searchable data that you want to associate with the user profile. + * This field supports a nested data structure. + * Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). + * The data object is not searchable, but can be retrieved with the get user profile API. */ data?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, if_seq_no?: never, if_primary_term?: never, refresh?: never, labels?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, if_seq_no?: never, if_primary_term?: never, refresh?: never, labels?: never, data?: never } } export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase @@ -20323,9 +34484,16 @@ export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase export type ShutdownType = 'restart' | 'remove' | 'replace' export interface ShutdownDeleteNodeRequest extends RequestBase { + /** The node id of node to be removed from the shutdown state */ node_id: NodeId + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } } export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase @@ -20350,8 +34518,14 @@ export interface ShutdownGetNodePluginsStatus { } export interface ShutdownGetNodeRequest extends RequestBase { + /** Which node for which to retrieve the shutdown status */ node_id?: NodeIds + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never } } export interface ShutdownGetNodeResponse { @@ -20367,37 +34541,92 @@ export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'sta export type ShutdownGetNodeShutdownType = 'remove' | 'restart' export interface ShutdownPutNodeRequest extends RequestBase { + /** The node identifier. + * This parameter is not validated against the cluster's active nodes. + * This enables you to register a node for shut down while it is offline. + * No error is thrown if you specify an invalid node ID. */ node_id: NodeId + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: TimeUnit + /** Valid values are restart, remove, or replace. + * Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. + * Because the node is expected to rejoin the cluster, data is not migrated off of the node. + * Use remove when you need to permanently remove a node from the cluster. + * The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. + * Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. + * During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. */ type: ShutdownType + /** A human-readable reason that the node is being shut down. + * This field provides information for other cluster operators; it does not affect the shut down process. */ reason: string + /** Only valid if type is restart. + * Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. + * This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. + * If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. */ allocation_delay?: string + /** Only valid if type is replace. + * Specifies the name of the node that is replacing the node being shut down. + * Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. + * During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. */ target_node_name?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never, type?: never, reason?: never, allocation_delay?: never, target_node_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never, type?: never, reason?: never, allocation_delay?: never, target_node_name?: never } } export type ShutdownPutNodeResponse = AcknowledgedResponseBase export interface SimulateIngestIngestDocumentSimulationKeys { + /** Identifier for the document. */ _id: Id + /** Name of the index that the document would be indexed into if this were not a simulation. */ _index: IndexName + /** JSON body for the document. */ _source: Record + /** */ _version: SpecUtilsStringified + /** A list of the names of the pipelines executed on this document. */ executed_pipelines: string[] + /** A list of the fields that would be ignored at the indexing step. For example, a field whose + * value is larger than the allowed limit would make it through all of the pipelines, but + * would not be indexed into Elasticsearch. */ ignored_fields?: Record[] + /** Any error resulting from simulatng ingest on this doc. This can be an error generated by + * executing a processor, or a mapping validation error when simulating indexing the resulting + * doc. */ error?: ErrorCause } export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys & { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause } export interface SimulateIngestRequest extends RequestBase { + /** The index to simulate ingesting into. + * This value can be overridden by specifying an index on each document. + * If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ index?: IndexName + /** The pipeline to use as the default pipeline. + * This value can be used to override the default pipeline of the index. */ pipeline?: PipelineName + /** Sample documents to test in the pipeline. */ docs: IngestDocument[] + /** A map of component template names to substitute component template definition objects. */ component_template_substitutions?: Record + /** A map of index template names to substitute index template definition objects. */ index_template_substitutions?: Record mapping_addition?: MappingTypeMapping + /** Pipelines to test. + * If you don’t specify the `pipeline` request path parameter, this parameter is required. + * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline_substitutions?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } } export interface SimulateIngestResponse { @@ -20409,11 +34638,19 @@ export interface SimulateIngestSimulateIngestDocumentResult { } export interface SlmConfiguration { + /** If false, the snapshot fails if any data stream or index in indices is missing or closed. If true, the snapshot ignores missing or closed data streams and indices. */ ignore_unavailable?: boolean + /** A comma-separated list of data streams and indices to include in the snapshot. Multi-index syntax is supported. + * By default, a snapshot includes all data streams and indices in the cluster. If this argument is provided, the snapshot only includes the specified data streams and clusters. */ indices?: Indices + /** If true, the current global state is included in the snapshot. */ include_global_state?: boolean + /** A list of feature states to be included in this snapshot. A list of features available for inclusion in the snapshot and their descriptions be can be retrieved using the get features API. + * Each feature state includes one or more system indices containing data necessary for the function of that feature. Providing an empty array will include no feature states in the snapshot, regardless of the value of include_global_state. By default, all available feature states will be included in the snapshot if include_global_state is true, or no feature states if include_global_state is false. */ feature_states?: string[] + /** Attaches arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. Metadata must be less than 1024 bytes. */ metadata?: Metadata + /** If false, the entire snapshot will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean } @@ -20438,8 +34675,11 @@ export interface SlmPolicy { } export interface SlmRetention { + /** Time period after which a snapshot is considered expired and eligible for deletion. SLM deletes expired snapshots based on the slm.retention_schedule. */ expire_after: Duration + /** Maximum number of snapshots to retain, even if the snapshots have not yet expired. If the number of snapshots in the repository exceeds this limit, the policy retains the most recent snapshots and deletes older snapshots. */ max_count: integer + /** Minimum number of snapshots to retain, even if the snapshots have expired. */ min_count: integer } @@ -20447,11 +34687,15 @@ export interface SlmSnapshotLifecycle { in_progress?: SlmInProgress last_failure?: SlmInvocation last_success?: SlmInvocation + /** The last time the policy was modified. */ modified_date?: DateTime modified_date_millis: EpochTime + /** The next time the policy will run. */ next_execution?: DateTime next_execution_millis: EpochTime policy: SlmPolicy + /** The version of the snapshot policy. + * Only the latest version is stored and incremented when the policy is updated. */ version: VersionNumber stats: SlmStatistics } @@ -20472,27 +34716,49 @@ export interface SlmStatistics { retention_timed_out?: long policy?: Id total_snapshots_deleted?: long + /** @alias total_snapshots_deleted */ snapshots_deleted?: long total_snapshot_deletion_failures?: long + /** @alias total_snapshot_deletion_failures */ snapshot_deletion_failures?: long total_snapshots_failed?: long + /** @alias total_snapshots_failed */ snapshots_failed?: long total_snapshots_taken?: long + /** @alias total_snapshots_taken */ snapshots_taken?: long } export interface SlmDeleteLifecycleRequest extends RequestBase { + /** The id of the snapshot lifecycle policy to remove */ policy_id: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } } export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { + /** The id of the snapshot lifecycle policy to be executed */ policy_id: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } } export interface SlmExecuteLifecycleResponse { @@ -20500,23 +34766,46 @@ export interface SlmExecuteLifecycleResponse { } export interface SlmExecuteRetentionRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { + /** Comma-separated list of snapshot lifecycle policies to retrieve */ policy_id?: Names + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } } export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export interface SlmGetStatsResponse { @@ -20533,8 +34822,18 @@ export interface SlmGetStatsResponse { } export interface SlmGetStatusRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export interface SlmGetStatusResponse { @@ -20542,28 +34841,64 @@ export interface SlmGetStatusResponse { } export interface SlmPutLifecycleRequest extends RequestBase { + /** The identifier for the snapshot lifecycle policy you want to create or update. */ policy_id: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** Configuration for each snapshot created by the policy. */ config?: SlmConfiguration + /** Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. */ name?: Name + /** Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. */ repository?: string + /** Retention rules used to retain and delete snapshots created by the policy. */ retention?: SlmRetention + /** Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. */ schedule?: WatcherCronExpression + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never, config?: never, name?: never, repository?: never, retention?: never, schedule?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never, config?: never, name?: never, repository?: never, retention?: never, schedule?: never } } export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } } export type SlmStopResponse = AcknowledgedResponseBase @@ -20750,6 +35085,7 @@ export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepository delegate_type?: string max_number_of_snapshots?: integer read_only?: boolean + /** @alias read_only */ readonly?: boolean } @@ -20765,97 +35101,178 @@ export interface SnapshotStatus { } export interface SnapshotCleanupRepositoryCleanupRepositoryResults { + /** Number of binary large objects (blobs) removed during cleanup. */ deleted_blobs: long + /** Number of bytes freed by cleanup operations. */ deleted_bytes: long } export interface SnapshotCleanupRepositoryRequest extends RequestBase { + /** Snapshot repository to clean up. */ name: Name + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** Period to wait for a response. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export interface SnapshotCleanupRepositoryResponse { + /** Statistics for cleanup operations. */ results: SnapshotCleanupRepositoryCleanupRepositoryResults } export interface SnapshotCloneRequest extends RequestBase { + /** A repository name */ repository: Name + /** The name of the snapshot to clone from */ snapshot: Name + /** The name of the cloned snapshot to create */ target_snapshot: Name + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration indices: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, indices?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, indices?: never } } export type SnapshotCloneResponse = AcknowledgedResponseBase export interface SnapshotCreateRequest extends RequestBase { + /** Repository for the snapshot. */ repository: Name + /** Name of the snapshot. Must be unique in the repository. */ snapshot: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. */ wait_for_completion?: boolean + /** If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. */ ignore_unavailable?: boolean + /** If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). */ include_global_state?: boolean + /** Data streams and indices to include in the snapshot. Supports multi-target syntax. Includes all data streams and indices by default. */ indices?: Indices + /** Feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. */ feature_states?: string[] + /** Optional metadata for the snapshot. May have any contents. Must be less than 1024 bytes. This map is not automatically generated by Elasticsearch. */ metadata?: Metadata + /** If `true`, allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, ignore_unavailable?: never, include_global_state?: never, indices?: never, feature_states?: never, metadata?: never, partial?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, ignore_unavailable?: never, include_global_state?: never, indices?: never, feature_states?: never, metadata?: never, partial?: never } } export interface SnapshotCreateResponse { + /** Equals `true` if the snapshot was accepted. Present when the request had `wait_for_completion` set to `false` */ accepted?: boolean + /** Snapshot information. Present when the request had `wait_for_completion` set to `true` */ snapshot?: SnapshotSnapshotInfo } export interface SnapshotCreateRepositoryRequest extends RequestBase { + /** A repository name */ name: Name + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** Whether to verify the repository after creation */ verify?: boolean repository?: SnapshotRepository + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, verify?: never, repository?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, verify?: never, repository?: never } } export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase export interface SnapshotDeleteRequest extends RequestBase { + /** A repository name */ repository: Name + /** A comma-separated list of snapshot names */ snapshot: Name + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** If `true`, the request returns a response when the matching snapshots are all deleted. + * If `false`, the request returns a response as soon as the deletes are scheduled. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never } } export type SnapshotDeleteResponse = AcknowledgedResponseBase export interface SnapshotDeleteRepositoryRequest extends RequestBase { + /** Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. */ name: Names + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase export interface SnapshotGetRequest extends RequestBase { + /** Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. */ repository: Name + /** Comma-separated list of snapshot names to retrieve. Also accepts wildcards (*). + * - To get information about all snapshots in a registered repository, use a wildcard (*) or _all. + * - To get information about any snapshots that are currently running, use _current. */ snapshot: Names + /** If false, the request returns an error for any snapshots that are unavailable. */ ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** If true, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. */ verbose?: boolean + /** If true, returns additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. Defaults to false, meaning that this information is omitted. */ index_details?: boolean + /** If true, returns the name of each index in each snapshot. */ index_names?: boolean + /** If true, returns the repository name in each snapshot. */ include_repository?: boolean + /** Allows setting a sort order for the result. Defaults to start_time, i.e. sorting by snapshot start time stamp. */ sort?: SnapshotSnapshotSort + /** Maximum number of snapshots to return. Defaults to 0 which means return all that match the request without limit. */ size?: integer + /** Sort order. Valid values are asc for ascending and desc for descending order. Defaults to asc, meaning ascending order. */ order?: SortOrder + /** Offset identifier to start pagination from as returned by the next field in the response body. */ after?: string + /** Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. */ offset?: integer + /** Value of the current sort column at which to start retrieval. Can either be a string snapshot- or repository name when sorting by snapshot or repository name, a millisecond time value or a number when sorting by index- or shard count. */ from_sort_value?: string + /** Filter snapshots by a comma-separated list of SLM policy names that snapshots belong to. Also accepts wildcards (*) and combinations of wildcards followed by exclude patterns starting with -. To include snapshots not created by an SLM policy you can use the special pattern _none that will match all snapshots without an SLM policy. */ slm_policy_filter?: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never, verbose?: never, index_details?: never, index_names?: never, include_repository?: never, sort?: never, size?: never, order?: never, after?: never, offset?: never, from_sort_value?: never, slm_policy_filter?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never, verbose?: never, index_details?: never, index_names?: never, include_repository?: never, sort?: never, size?: never, order?: never, after?: never, offset?: never, from_sort_value?: never, slm_policy_filter?: never } } export interface SnapshotGetResponse { responses?: SnapshotGetSnapshotResponseItem[] snapshots?: SnapshotSnapshotInfo[] + /** The total number of snapshots that match the request when ignoring size limit or after query parameter. */ total: integer + /** The number of remaining snapshots that were not returned due to size limits and that can be fetched by additional requests using the next field value. */ remaining: integer } @@ -20866,97 +35283,196 @@ export interface SnapshotGetSnapshotResponseItem { } export interface SnapshotGetRepositoryRequest extends RequestBase { + /** A comma-separated list of repository names */ name?: Names + /** Return local information, do not retrieve the state from master node (default: false) */ local?: boolean + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } } export type SnapshotGetRepositoryResponse = Record export interface SnapshotRepositoryAnalyzeBlobDetails { + /** The name of the blob. */ name: string + /** Indicates whether the blob was overwritten while the read operations were ongoing. + * /** */ overwritten: boolean read_early: boolean + /** The position, in bytes, at which read operations completed. */ read_end: long + /** The position, in bytes, at which read operations started. */ read_start: long + /** A description of every read operation performed on the blob. */ reads: SnapshotRepositoryAnalyzeReadBlobDetails + /** The size of the blob. */ size: ByteSize + /** The size of the blob in bytes. */ size_bytes: long } export interface SnapshotRepositoryAnalyzeDetailsInfo { + /** A description of the blob that was written and read. */ blob: SnapshotRepositoryAnalyzeBlobDetails + /** The elapsed time spent overwriting the blob. + * If the blob was not overwritten, this information is omitted. */ overwrite_elapsed?: Duration + /** The elapsed time spent overwriting the blob, in nanoseconds. + * If the blob was not overwritten, this information is omitted. */ overwrite_elapsed_nanos?: DurationValue + /** The elapsed time spent writing the blob. */ write_elapsed: Duration + /** The elapsed time spent writing the blob, in nanoseconds. */ write_elapsed_nanos: DurationValue + /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob. */ write_throttled: Duration + /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob, in nanoseconds. */ write_throttled_nanos: DurationValue + /** The node which wrote the blob and coordinated the read operations. */ writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo } export interface SnapshotRepositoryAnalyzeReadBlobDetails { + /** Indicates whether the read operation may have started before the write operation was complete. */ before_write_complete?: boolean + /** The length of time spent reading the blob. + * If the blob was not found, this detail is omitted. */ elapsed?: Duration + /** The length of time spent reading the blob, in nanoseconds. + * If the blob was not found, this detail is omitted. */ elapsed_nanos?: DurationValue + /** The length of time waiting for the first byte of the read operation to be received. + * If the blob was not found, this detail is omitted. */ first_byte_time?: Duration + /** The length of time waiting for the first byte of the read operation to be received, in nanoseconds. + * If the blob was not found, this detail is omitted. */ first_byte_time_nanos: DurationValue + /** Indicates whether the blob was found by the read operation. + * If the read was started before the write completed or the write was ended before completion, it might be false. */ found: boolean + /** The node that performed the read operation. */ node: SnapshotRepositoryAnalyzeSnapshotNodeInfo + /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob. + * If the blob was not found, this detail is omitted. */ throttled?: Duration + /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob, in nanoseconds. + * If the blob was not found, this detail is omitted. */ throttled_nanos?: DurationValue } export interface SnapshotRepositoryAnalyzeReadSummaryInfo { + /** The number of read operations performed in the test. */ count: integer + /** The maximum time spent waiting for the first byte of any read request to be received. */ max_wait: Duration + /** The maximum time spent waiting for the first byte of any read request to be received, in nanoseconds. */ max_wait_nanos: DurationValue + /** The total elapsed time spent on reading blobs in the test. */ total_elapsed: Duration + /** The total elapsed time spent on reading blobs in the test, in nanoseconds. */ total_elapsed_nanos: DurationValue + /** The total size of all the blobs or partial blobs read in the test. */ total_size: ByteSize + /** The total size of all the blobs or partial blobs read in the test, in bytes. */ total_size_bytes: long + /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles. */ total_throttled: Duration + /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles, in nanoseconds. */ total_throttled_nanos: DurationValue + /** The total time spent waiting for the first byte of each read request to be received. */ total_wait: Duration + /** The total time spent waiting for the first byte of each read request to be received, in nanoseconds. */ total_wait_nanos: DurationValue } export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { + /** The name of the repository. */ name: Name + /** The total number of blobs to write to the repository during the test. + * For realistic experiments, you should set it to at least `2000`. */ blob_count?: integer + /** The number of operations to run concurrently during the test. */ concurrency?: integer + /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. + * If false, it returns only a summary of the analysis. */ detailed?: boolean + /** The number of nodes on which to perform an early read operation while writing each blob. + * Early read operations are only rarely performed. */ early_read_node_count?: integer + /** The maximum size of a blob to be written during the test. + * For realistic experiments, you should set it to at least `2gb`. */ max_blob_size?: ByteSize + /** An upper limit on the total size of all the blobs written during the test. + * For realistic experiments, you should set it to at least `1tb`. */ max_total_data_size?: ByteSize + /** The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. */ rare_action_probability?: double + /** Indicates whether to rarely cancel writes before they complete. */ rarely_abort_writes?: boolean + /** The number of nodes on which to read a blob after writing. */ read_node_count?: integer + /** The minimum number of linearizable register operations to perform in total. + * For realistic experiments, you should set it to at least `100`. */ register_operation_count?: integer + /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. + * To repeat the same set of operations in multiple experiments, use the same seed in each experiment. + * Note that the operations are performed concurrently so might not always happen in the same order on each run. */ seed?: integer + /** The period of time to wait for the test to complete. + * If no response is received before the timeout expires, the test is cancelled and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } } export interface SnapshotRepositoryAnalyzeResponse { + /** The number of blobs written to the repository during the test. */ blob_count: integer + /** The path in the repository under which all the blobs were written during the test. */ blob_path: string + /** The number of write operations performed concurrently during the test. */ concurrency: integer + /** The node that coordinated the analysis and performed the final cleanup. */ coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo + /** The time it took to delete all the blobs in the container. */ delete_elapsed: Duration + /** The time it took to delete all the blobs in the container, in nanoseconds. */ delete_elapsed_nanos: DurationValue + /** A description of every read and write operation performed during the test. */ details: SnapshotRepositoryAnalyzeDetailsInfo + /** The limit on the number of nodes on which early read operations were performed after writing each blob. */ early_read_node_count: integer + /** A list of correctness issues detected, which is empty if the API succeeded. + * It is included to emphasize that a successful response does not guarantee correct behaviour in future. */ issues_detected: string[] + /** The time it took to retrieve a list of all the blobs in the container. */ listing_elapsed: Duration + /** The time it took to retrieve a list of all the blobs in the container, in nanoseconds. */ listing_elapsed_nanos: DurationValue + /** The limit on the size of a blob written during the test. */ max_blob_size: ByteSize + /** The limit, in bytes, on the size of a blob written during the test. */ max_blob_size_bytes: long + /** The limit on the total size of all blob written during the test. */ max_total_data_size: ByteSize + /** The limit, in bytes, on the total size of all blob written during the test. */ max_total_data_size_bytes: long + /** The probability of performing rare actions during the test. */ rare_action_probability: double + /** The limit on the number of nodes on which read operations were performed after writing each blob. */ read_node_count: integer + /** The name of the repository that was the subject of the analysis. */ repository: string + /** The seed for the pseudo-random number generator used to generate the operations used during the test. */ seed: long + /** A collection of statistics that summarize the results of the test. */ summary: SnapshotRepositoryAnalyzeSummaryInfo } @@ -20966,38 +35482,64 @@ export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { } export interface SnapshotRepositoryAnalyzeSummaryInfo { + /** A collection of statistics that summarise the results of the read operations in the test. */ read: SnapshotRepositoryAnalyzeReadSummaryInfo + /** A collection of statistics that summarise the results of the write operations in the test. */ write: SnapshotRepositoryAnalyzeWriteSummaryInfo } export interface SnapshotRepositoryAnalyzeWriteSummaryInfo { + /** The number of write operations performed in the test. */ count: integer + /** The total elapsed time spent on writing blobs in the test. */ total_elapsed: Duration + /** The total elapsed time spent on writing blobs in the test, in nanoseconds. */ total_elapsed_nanos: DurationValue + /** The total size of all the blobs written in the test. */ total_size: ByteSize + /** The total size of all the blobs written in the test, in bytes. */ total_size_bytes: long + /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle. */ total_throttled: Duration + /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle, in nanoseconds. */ total_throttled_nanos: long } export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { + /** A repository name */ name: Names + /** Number of threads to use for reading metadata */ meta_thread_pool_concurrency?: integer + /** Number of threads to use for reading blob contents */ blob_thread_pool_concurrency?: integer + /** Number of snapshots to verify concurrently */ snapshot_verification_concurrency?: integer + /** Number of indices to verify concurrently */ index_verification_concurrency?: integer + /** Number of snapshots to verify concurrently within each index */ index_snapshot_verification_concurrency?: integer + /** Maximum permitted number of failed shard snapshots */ max_failed_shard_snapshots?: integer + /** Whether to verify the contents of individual blobs */ verify_blob_contents?: boolean + /** Rate limit for individual blob verification */ max_bytes_per_sec?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, meta_thread_pool_concurrency?: never, blob_thread_pool_concurrency?: never, snapshot_verification_concurrency?: never, index_verification_concurrency?: never, index_snapshot_verification_concurrency?: never, max_failed_shard_snapshots?: never, verify_blob_contents?: never, max_bytes_per_sec?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, meta_thread_pool_concurrency?: never, blob_thread_pool_concurrency?: never, snapshot_verification_concurrency?: never, index_verification_concurrency?: never, index_snapshot_verification_concurrency?: never, max_failed_shard_snapshots?: never, verify_blob_contents?: never, max_bytes_per_sec?: never } } export type SnapshotRepositoryVerifyIntegrityResponse = any export interface SnapshotRestoreRequest extends RequestBase { + /** A repository name */ repository: Name + /** A snapshot name */ snapshot: Name + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** Should this request wait until the operation has completed before returning */ wait_for_completion?: boolean feature_states?: string[] ignore_index_settings?: string[] @@ -21009,6 +35551,10 @@ export interface SnapshotRestoreRequest extends RequestBase { partial?: boolean rename_pattern?: string rename_replacement?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, feature_states?: never, ignore_index_settings?: never, ignore_unavailable?: never, include_aliases?: never, include_global_state?: never, index_settings?: never, indices?: never, partial?: never, rename_pattern?: never, rename_replacement?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, feature_states?: never, ignore_index_settings?: never, ignore_unavailable?: never, include_aliases?: never, include_global_state?: never, index_settings?: never, indices?: never, partial?: never, rename_pattern?: never, rename_replacement?: never } } export interface SnapshotRestoreResponse { @@ -21023,10 +35569,18 @@ export interface SnapshotRestoreSnapshotRestore { } export interface SnapshotStatusRequest extends RequestBase { + /** A repository name */ repository?: Name + /** A comma-separated list of snapshot names */ snapshot?: Names + /** Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown */ ignore_unavailable?: boolean + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never } } export interface SnapshotStatusResponse { @@ -21038,9 +35592,16 @@ export interface SnapshotVerifyRepositoryCompactNodeInfo { } export interface SnapshotVerifyRepositoryRequest extends RequestBase { + /** A repository name */ name: Name + /** Explicit operation timeout for connection to master node */ master_timeout?: Duration + /** Explicit operation timeout */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } } export interface SnapshotVerifyRepositoryResponse { @@ -21055,7 +35616,12 @@ export interface SqlColumn { export type SqlRow = any[] export interface SqlClearCursorRequest extends RequestBase { + /** Cursor to clear. */ cursor: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { cursor?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { cursor?: never } } export interface SqlClearCursorResponse { @@ -21063,78 +35629,191 @@ export interface SqlClearCursorResponse { } export interface SqlDeleteAsyncRequest extends RequestBase { + /** The identifier for the search. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type SqlDeleteAsyncResponse = AcknowledgedResponseBase export interface SqlGetAsyncRequest extends RequestBase { + /** The identifier for the search. */ id: Id + /** The separator for CSV results. + * The API supports this parameter only for CSV responses. */ delimiter?: string + /** The format for the response. + * You must specify a format using this parameter or the `Accept` HTTP header. + * If you specify both, the API uses this parameter. */ format?: string + /** The retention period for the search and its results. + * It defaults to the `keep_alive` period for the original SQL search. */ keep_alive?: Duration + /** The period to wait for complete results. + * It defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, delimiter?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, delimiter?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } } export interface SqlGetAsyncResponse { + /** Identifier for the search. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ id: Id + /** If `true`, the search is still running. + * If `false`, the search has finished. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_running: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_partial: boolean + /** Column headings for the search results. Each object is a column. */ columns?: SqlColumn[] + /** The cursor for the next set of paginated results. + * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ cursor?: string + /** The values for the search results. */ rows: SqlRow[] } export interface SqlGetAsyncStatusRequest extends RequestBase { + /** The identifier for the search. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface SqlGetAsyncStatusResponse { + /** The timestamp, in milliseconds since the Unix epoch, when Elasticsearch will delete the search and its results, even if the search is still running. */ expiration_time_in_millis: EpochTime + /** The identifier for the search. */ id: string + /** If `true`, the search is still running. + * If `false`, the search has finished. */ is_running: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. */ is_partial: boolean + /** The timestamp, in milliseconds since the Unix epoch, when the search started. + * The API returns this property only for running searches. */ start_time_in_millis: EpochTime + /** The HTTP status code for the search. + * The API returns this property only for completed searches. */ completion_status?: uint } export interface SqlQueryRequest extends RequestBase { + /** The format for the response. + * You can also specify a format using the `Accept` HTTP header. + * If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ format?: SqlQuerySqlFormat + /** If `true`, the response has partial results when there are shard request timeouts or shard failures. + * If `false`, the API returns an error with no partial results. */ allow_partial_search_results?: boolean + /** The default catalog (cluster) for queries. + * If unspecified, the queries execute on the data in the local cluster only. */ catalog?: string + /** If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. + * The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. */ columnar?: boolean + /** The cursor used to retrieve a set of paginated results. + * If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. + * It ignores other request body parameters. */ cursor?: string + /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer + /** If `false`, the API returns an exception when encountering multiple values for a field. + * If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. */ field_multi_value_leniency?: boolean + /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer + /** If `true`, the search can run on frozen indices. */ index_using_frozen?: boolean + /** The retention period for an async or saved synchronous search. */ keep_alive?: Duration + /** If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. + * If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. */ keep_on_completion?: boolean + /** The minimum retention period for the scroll cursor. + * After this time period, a pagination request might fail because the scroll cursor is no longer available. + * Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. */ page_timeout?: Duration + /** The values for parameters in the query. */ params?: any[] + /** The SQL query to run. */ query?: string + /** The timeout before the request fails. */ request_timeout?: Duration + /** One or more runtime fields for the search request. + * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields + /** The ISO-8601 time zone ID for the search. */ time_zone?: TimeZone + /** The period to wait for complete results. + * It defaults to no timeout, meaning the request waits for complete search results. + * If the search doesn't finish within this period, the search becomes async. + * + * To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { format?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { format?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } } export interface SqlQueryResponse { + /** Column headings for the search results. Each object is a column. */ columns?: SqlColumn[] + /** The cursor for the next set of paginated results. + * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ cursor?: string + /** The identifier for the search. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ id?: Id + /** If `true`, the search is still running. + * If `false`, the search has finished. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_running?: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_partial?: boolean + /** The values for the search results. */ rows: SqlRow[] } export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' export interface SqlTranslateRequest extends RequestBase { + /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer + /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer + /** The SQL query to run. */ query: string + /** The ISO-8601 time zone ID for the search. */ time_zone?: TimeZone + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fetch_size?: never, filter?: never, query?: never, time_zone?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fetch_size?: never, filter?: never, query?: never, time_zone?: never } } export interface SqlTranslateResponse { @@ -21147,87 +35826,151 @@ export interface SqlTranslateResponse { } export interface SslCertificatesCertificateInformation { + /** If the path refers to a container file (a jks keystore, or a PKCS#12 file), it is the alias of the certificate. + * Otherwise, it is null. */ alias: string | null + /** The ISO formatted date of the certificate's expiry (not-after) date. */ expiry: DateTime + /** The format of the file. + * Valid values include `jks`, `PKCS12`, and `PEM`. */ format: string + /** Indicates whether Elasticsearch has access to the private key for this certificate. */ has_private_key: boolean + /** The Distinguished Name of the certificate's issuer. */ issuer?: string + /** The path to the certificate, as configured in the `elasticsearch.yml` file. */ path: string + /** The hexadecimal representation of the certificate's serial number. */ serial_number: string + /** The Distinguished Name of the certificate's subject. */ subject_dn: string } export interface SslCertificatesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } } export type SslCertificatesResponse = SslCertificatesCertificateInformation[] export interface SynonymsSynonymRule { + /** The identifier for the synonym rule. + * If you do not specify a synonym rule ID when you create a rule, an identifier is created automatically by Elasticsearch. */ id?: Id + /** The synonyms that conform the synonym rule in Solr format. */ synonyms: SynonymsSynonymString } export interface SynonymsSynonymRuleRead { + /** Synonym Rule identifier */ id: Id + /** Synonyms, in Solr format, that conform the synonym rule. See https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html#_solr_synonyms_2 */ synonyms: SynonymsSynonymString } export type SynonymsSynonymString = string export interface SynonymsSynonymsUpdateResult { + /** The update operation result. */ result: Result + /** Updating synonyms in a synonym set reloads the associated analyzers. + * This information is the analyzers reloading result. */ reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult } export interface SynonymsDeleteSynonymRequest extends RequestBase { + /** The synonyms set identifier to delete. */ id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { + /** The ID of the synonym set to update. */ set_id: Id + /** The ID of the synonym rule to delete. */ rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never } } export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult export interface SynonymsGetSynonymRequest extends RequestBase { + /** The synonyms set identifier to retrieve. */ id: Id + /** The starting offset for query rules to retrieve. */ from?: integer + /** The max number of query rules to retrieve. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, from?: never, size?: never } } export interface SynonymsGetSynonymResponse { + /** The total number of synonyms rules that the synonyms set contains. */ count: integer + /** Synonym rule details. */ synonyms_set: SynonymsSynonymRuleRead[] } export interface SynonymsGetSynonymRuleRequest extends RequestBase { + /** The ID of the synonym set to retrieve the synonym rule from. */ set_id: Id + /** The ID of the synonym rule to retrieve. */ rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never } } export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead export interface SynonymsGetSynonymsSetsRequest extends RequestBase { + /** The starting offset for synonyms sets to retrieve. */ from?: integer + /** The maximum number of synonyms sets to retrieve. */ size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never } } export interface SynonymsGetSynonymsSetsResponse { + /** The total number of synonyms sets defined. */ count: integer + /** The identifier and total number of defined synonym rules for each synonyms set. */ results: SynonymsGetSynonymsSetsSynonymsSetItem[] } export interface SynonymsGetSynonymsSetsSynonymsSetItem { + /** Synonyms set identifier */ synonyms_set: Id + /** Number of synonym rules that the synonym set contains */ count: integer } export interface SynonymsPutSynonymRequest extends RequestBase { + /** The ID of the synonyms set to be created or updated. */ id: Id + /** The synonym rules definitions for the synonyms set. */ synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, synonyms_set?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, synonyms_set?: never } } export interface SynonymsPutSynonymResponse { @@ -21236,9 +35979,16 @@ export interface SynonymsPutSynonymResponse { } export interface SynonymsPutSynonymRuleRequest extends RequestBase { + /** The ID of the synonym set. */ set_id: Id + /** The ID of the synonym rule to be updated or created. */ rule_id: Id + /** The synonym rule information definition, which must be in Solr format. */ synonyms: SynonymsSynonymString + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never, synonyms?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never, synonyms?: never } } export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult @@ -21263,6 +36013,10 @@ export interface TasksTaskInfo { action: string cancelled?: boolean cancellable: boolean + /** Human readable text that identifies the particular request that the task is performing. + * For example, it might identify the search request being performed by a search task. + * Other kinds of tasks have different descriptions, like `_reindex` which has the source and the destination, or `_bulk` which just has the number of requests and the destination indices. + * Many requests will have only an empty description because more detailed information about the request is not easily available or particularly helpful in identifying the request. */ description?: string headers: Record id: long @@ -21270,6 +36024,10 @@ export interface TasksTaskInfo { running_time?: Duration running_time_in_nanos: DurationValue start_time_in_millis: EpochTime + /** The internal status of the task, which varies from task to task. + * The format also varies. + * While the goal is to keep the status for a particular task consistent from version to version, this is not always possible because sometimes the implementation changes. + * Fields might be removed from the status for a particular request so any parsing you do of the status might break in minor releases. */ status?: any type: string parent_task_id?: TaskId @@ -21280,24 +36038,44 @@ export type TasksTaskInfos = TasksTaskInfo[] | Record + /** Either a flat list of tasks if `group_by` was set to `none`, or grouped by parents if + * `group_by` was set to `parents`. */ tasks?: TasksTaskInfos } export interface TasksCancelRequest extends RequestBase { + /** The task identifier. */ task_id?: TaskId + /** A comma-separated list or wildcard expression of actions that is used to limit the request. */ actions?: string | string[] + /** A comma-separated list of node IDs or names that is used to limit the request. */ nodes?: string[] + /** A parent task ID that is used to limit the tasks. */ parent_task_id?: string + /** If true, the request blocks until all found tasks are complete. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, actions?: never, nodes?: never, parent_task_id?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, actions?: never, nodes?: never, parent_task_id?: never, wait_for_completion?: never } } export type TasksCancelResponse = TasksTaskListResponseBase export interface TasksGetRequest extends RequestBase { + /** The task identifier. */ task_id: Id + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, timeout?: never, wait_for_completion?: never } } export interface TasksGetResponse { @@ -21308,13 +36086,31 @@ export interface TasksGetResponse { } export interface TasksListRequest extends RequestBase { + /** A comma-separated list or wildcard expression of actions used to limit the request. + * For example, you can use `cluser:*` to retrieve all cluster-related tasks. */ actions?: string | string[] + /** If `true`, the response includes detailed information about the running tasks. + * This information is useful to distinguish tasks from each other but is more costly to run. */ detailed?: boolean + /** A key that is used to group tasks in the response. + * The task lists can be grouped either by nodes or by parent tasks. */ group_by?: TasksGroupBy + /** A comma-separated list of node IDs or names that is used to limit the returned information. */ nodes?: NodeIds + /** A parent task identifier that is used to limit returned information. + * To return all tasks, omit this parameter or use a value of `-1`. + * If the parent task is not found, the API does not return a 404 response code. */ parent_task_id?: Id + /** The period to wait for each node to respond. + * If a node does not respond before its timeout expires, the response does not include its information. + * However, timed out nodes are included in the `node_failures` property. */ timeout?: Duration + /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never, detailed?: never, group_by?: never, nodes?: never, parent_task_id?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never, detailed?: never, group_by?: never, nodes?: never, parent_task_id?: never, timeout?: never, wait_for_completion?: never } } export type TasksListResponse = TasksTaskListResponseBase @@ -21341,20 +36137,103 @@ export interface TextStructureTopHit { } export interface TextStructureFindFieldStructureRequest extends RequestBase { + /** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ column_names?: string + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string + /** The number of documents to include in the structural analysis. + * The minimum value is 2. */ documents_to_sample?: uint + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. + * The intention in that situation is that a user who knows the meanings will rename the fields before using them. */ ecs_compatibility?: TextStructureEcsCompatibilityType + /** If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean + /** The field that should be analyzed. */ field: Field + /** The high level structure of the text. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern + /** The name of the index that contains the analyzed field. */ index: IndexName + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field + /** The Java time format of the timestamp field in the text. + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { column_names?: never, delimiter?: never, documents_to_sample?: never, ecs_compatibility?: never, explain?: never, field?: never, format?: never, grok_pattern?: never, index?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { column_names?: never, delimiter?: never, documents_to_sample?: never, ecs_compatibility?: never, explain?: never, field?: never, format?: never, grok_pattern?: never, index?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never } } export interface TextStructureFindFieldStructureResponse { @@ -21376,18 +36255,97 @@ export interface TextStructureFindFieldStructureResponse { } export interface TextStructureFindMessageStructureRequest extends RequestBase { + /** If the format is `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string + /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: TextStructureEcsCompatibilityType + /** If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean + /** The high level structure of the text. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field + /** The Java time format of the timestamp field in the text. + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string + /** The list of messages you want to analyze. */ messages: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, messages?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, messages?: never } } export interface TextStructureFindMessageStructureResponse { @@ -21409,44 +36367,160 @@ export interface TextStructureFindMessageStructureResponse { } export interface TextStructureFindStructureRequest { + /** The text's character set. + * It must be a character set that is supported by the JVM that Elasticsearch uses. + * For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. + * If this parameter is not specified, the structure finder chooses an appropriate character set. */ charset?: string + /** If you have set format to `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * Valid values are `disabled` and `v1`. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: string + /** If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. + * If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. */ explain?: boolean + /** The high level structure of the text. + * Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: string + /** If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern + /** If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. + * If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. */ has_header_row?: boolean + /** The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. + * If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. */ line_merge_size_limit?: uint + /** The number of lines to include in the structural analysis, starting from the beginning of the text. + * The minimum is 2. + * If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + * + * NOTE: The number of lines and the variation of the lines affects the speed of the analysis. + * For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. + * If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. */ lines_to_sample?: uint + /** If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string + /** If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires then it will be stopped. */ timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field + /** The Java time format of the timestamp field in the text. + * + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. + * Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string text_files?: TJsonDocument[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { charset?: never, column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, has_header_row?: never, line_merge_size_limit?: never, lines_to_sample?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, text_files?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { charset?: never, column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, has_header_row?: never, line_merge_size_limit?: never, lines_to_sample?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, text_files?: never } } export interface TextStructureFindStructureResponse { + /** The character encoding used to parse the text. */ charset: string has_header_row?: boolean + /** For UTF character encodings, it indicates whether the text begins with a byte order marker. */ has_byte_order_marker: boolean + /** Valid values include `ndjson`, `xml`, `delimited`, and `semi_structured_text`. */ format: string + /** The most common values of each field, plus basic numeric statistics for the numeric `page_count` field. + * This information may provide clues that the data needs to be cleaned or transformed prior to use by other Elastic Stack functionality. */ field_stats: Record + /** The first two messages in the text verbatim. + * This may help diagnose parse errors or accidental uploads of the wrong text. */ sample_start: string + /** The number of distinct messages the lines contained. + * For NDJSON, this value is the same as `num_lines_analyzed`. + * For other text formats, messages can span several lines. */ num_messages_analyzed: integer + /** Some suitable mappings for an index into which the data could be ingested. */ mappings: MappingTypeMapping quote?: string delimiter?: string + /** If a timestamp format is detected that does not include a timezone, `need_client_timezone` is `true`. + * The server that parses the text must therefore be told the correct timezone by the client. */ need_client_timezone: boolean + /** The number of lines of the text that were analyzed. */ num_lines_analyzed: integer + /** If `format` is `delimited`, the `column_names` field lists the column names in the order they appear in the sample. */ column_names?: string[] explanation?: string[] grok_pattern?: GrokPattern multiline_start_pattern?: string exclude_lines_pattern?: string + /** The Java time formats recognized in the time fields. + * Elasticsearch mappings and ingest pipelines use this format. */ java_timestamp_formats?: string[] + /** Information that is used to tell Logstash how to parse timestamps. */ joda_timestamp_formats?: string[] + /** The field considered most likely to be the primary timestamp of each document. */ timestamp_field?: Field should_trim_fields?: boolean ingest_pipeline: IngestPipelineConfig @@ -21464,9 +36538,18 @@ export interface TextStructureTestGrokPatternMatchedText { } export interface TextStructureTestGrokPatternRequest extends RequestBase { + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * Valid values are `disabled` and `v1`. */ ecs_compatibility?: string + /** The Grok pattern to run on the text. */ grok_pattern: GrokPattern + /** The lines of text to run the Grok pattern on. */ text: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ecs_compatibility?: never, grok_pattern?: never, text?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ecs_compatibility?: never, grok_pattern?: never, text?: never } } export interface TextStructureTestGrokPatternResponse { @@ -21474,18 +36557,35 @@ export interface TextStructureTestGrokPatternResponse { } export interface TransformDestination { + /** The destination index for the transform. The mappings of the destination index are deduced based on the source + * fields when possible. If alternate mappings are required, use the create index API prior to starting the + * transform. */ index?: IndexName + /** The unique identifier for an ingest pipeline. */ pipeline?: string } export interface TransformLatest { + /** Specifies the date field that is used to identify the latest documents. */ sort: Field + /** Specifies an array of one or more fields that are used to group the data. */ unique_key: Field[] } export interface TransformPivot { + /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket + * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, + * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted + * average. */ aggregations?: Record + /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket + * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, + * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted + * average. + * @alias aggregations */ aggs?: Record + /** Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are + * currently supported: date histogram, geotile grid, histogram, terms. */ group_by?: Record } @@ -21497,53 +36597,115 @@ export interface TransformPivotGroupByContainer { } export interface TransformRetentionPolicy { + /** The date field that is used to calculate the age of the document. */ field: Field + /** Specifies the maximum age of a document in the destination index. Documents that are older than the configured + * value are removed from the destination index. */ max_age: Duration } export interface TransformRetentionPolicyContainer { + /** Specifies that the transform uses a time field to set the retention policy. */ time?: TransformRetentionPolicy } export interface TransformSettings { + /** Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align + * checkpoint ranges with the date histogram interval when date histogram is specified as a group source in the + * transform config. As a result, less document updates in the destination index will be performed thus improving + * overall performance. */ align_checkpoints?: boolean + /** Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was + * the default for transforms created before version 7.11. For compatible output set this value to `true`. */ dates_as_epoch_millis?: boolean + /** Specifies whether the transform should deduce the destination index mappings from the transform configuration. */ deduce_mappings?: boolean + /** Specifies a limit on the number of input documents per second. This setting throttles the transform by adding a + * wait time between search requests. The default value is null, which disables throttling. */ docs_per_second?: float + /** Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker + * exceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the + * maximum is `65,536`. */ max_page_search_size?: integer + /** If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case + * of an error which means the transform never fails. Setting the number of retries other than infinite fails in + * validation. */ unattended?: boolean } export interface TransformSource { + /** The source indices for the transform. It can be a single index, an index pattern (for example, `"my-index-*""`), an + * array of indices (for example, `["my-index-000001", "my-index-000002"]`), or an array of index patterns (for + * example, `["my-index-*", "my-other-index-*"]`. For remote indices use the syntax `"remote_name:index_name"`. If + * any indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role. */ index: Indices + /** A query clause that retrieves a subset of data from the source index. */ query?: QueryDslQueryContainer + /** Definitions of search-time runtime fields that can be used by the transform. For search runtime fields all data + * nodes, including remote nodes, must be 7.12 or later. */ runtime_mappings?: MappingRuntimeFields } export interface TransformSyncContainer { + /** Specifies that the transform uses a time field to synchronize the source and destination indices. */ time?: TransformTimeSync } export interface TransformTimeSync { + /** The time delay between the current time and the latest input data time. */ delay?: Duration + /** The date field that is used to identify new documents in the source. In general, it’s a good idea to use a field + * that contains the ingest timestamp. If you use a different field, you might need to set the delay such that it + * accounts for data transmission delays. */ field: Field } export interface TransformDeleteTransformRequest extends RequestBase { + /** Identifier for the transform. */ transform_id: Id + /** If this value is false, the transform must be stopped before it can be deleted. If true, the transform is + * deleted regardless of its current state. */ force?: boolean + /** If this value is true, the destination index is deleted together with the transform. If false, the destination + * index will not be deleted */ delete_dest_index?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, force?: never, delete_dest_index?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, force?: never, delete_dest_index?: never, timeout?: never } } export type TransformDeleteTransformResponse = AcknowledgedResponseBase export interface TransformGetTransformRequest extends RequestBase { + /** Identifier for the transform. It can be a transform identifier or a + * wildcard expression. You can get information for all transforms by using + * `_all`, by specifying `*` as the ``, or by omitting the + * ``. */ transform_id?: Names + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no transforms that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If this parameter is false, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of transforms. */ from?: integer + /** Specifies the maximum number of transforms to obtain. */ size?: integer + /** Excludes fields that were automatically added when creating the + * transform. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } } export interface TransformGetTransformResponse { @@ -21552,19 +36714,28 @@ export interface TransformGetTransformResponse { } export interface TransformGetTransformTransformSummary { + /** The security privileges that the transform uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the transform, this property is omitted. */ authorization?: MlTransformAuthorization + /** The time the transform was created. */ create_time?: EpochTime create_time_string?: DateTime + /** Free text description of the transform. */ description?: string + /** The destination for the transform. */ dest: ReindexDestination frequency?: Duration id: Id latest?: TransformLatest + /** The pivot method transforms the data by aggregating and grouping it. */ pivot?: TransformPivot retention_policy?: TransformRetentionPolicyContainer + /** Defines optional transform settings. */ settings?: TransformSettings + /** The source of the data for the transform. */ source: TransformSource + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** The version of Elasticsearch that existed on the node when the transform was created. */ version?: VersionString _meta?: Metadata } @@ -21589,11 +36760,30 @@ export interface TransformGetTransformStatsCheckpointing { } export interface TransformGetTransformStatsRequest extends RequestBase { + /** Identifier for the transform. It can be a transform identifier or a + * wildcard expression. You can get information for all transforms by using + * `_all`, by specifying `*` as the ``, or by omitting the + * ``. */ transform_id: Names + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no transforms that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If this parameter is false, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean + /** Skips the specified number of transforms. */ from?: long + /** Specifies the maximum number of transforms to obtain. */ size?: long + /** Controls the time to wait for the stats */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, timeout?: never } } export interface TransformGetTransformStatsResponse { @@ -21602,10 +36792,15 @@ export interface TransformGetTransformStatsResponse { } export interface TransformGetTransformStatsTransformHealthIssue { + /** The type of the issue */ type: string + /** A description of the issue */ issue: string + /** Details about the issue */ details?: string + /** Number of times this issue has occurred since it started */ count: integer + /** The timestamp this issue occurred for for the first time */ first_occurrence?: EpochTime first_occurence_string?: DateTime } @@ -21642,6 +36837,7 @@ export interface TransformGetTransformStatsTransformStats { checkpointing: TransformGetTransformStatsCheckpointing health?: TransformGetTransformStatsTransformStatsHealth id: Id + /** @remarks This property is not supported on Elastic Cloud Serverless. */ node?: NodeAttributes reason?: string state: string @@ -21650,21 +36846,46 @@ export interface TransformGetTransformStatsTransformStats { export interface TransformGetTransformStatsTransformStatsHealth { status: HealthStatus + /** If a non-healthy status is returned, contains a list of issues of the transform. */ issues?: TransformGetTransformStatsTransformHealthIssue[] } export interface TransformPreviewTransformRequest extends RequestBase { + /** Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform + * configuration details in the request body. */ transform_id?: Id + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The destination for the transform. */ dest?: TransformDestination + /** Free text description of the transform. */ description?: string + /** The interval between checks for changes in the source indices when the + * transform is running continuously. Also determines the retry interval in + * the event of transient failures while the transform is searching or + * indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration + /** The pivot method transforms the data by aggregating and grouping it. + * These objects define the group by fields and the aggregation to reduce + * the data. */ pivot?: TransformPivot + /** The source of the data for the transform. */ source?: TransformSource + /** Defines optional transform settings. */ settings?: TransformSettings + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** Defines a retention policy for the transform. Data that meets the defined + * criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer + /** The latest method transforms the data by finding the latest document for + * each unique key. */ latest?: TransformLatest + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never, dest?: never, description?: never, frequency?: never, pivot?: never, source?: never, settings?: never, sync?: never, retention_policy?: never, latest?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never, dest?: never, description?: never, frequency?: never, pivot?: never, source?: never, settings?: never, sync?: never, retention_policy?: never, latest?: never } } export interface TransformPreviewTransformResponse { @@ -21673,69 +36894,176 @@ export interface TransformPreviewTransformResponse { } export interface TransformPutTransformRequest extends RequestBase { + /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id + /** When the transform is created, a series of validations occur to ensure its success. For example, there is a + * check for the existence of the source indices and a check that the destination index is not part of the source + * index pattern. You can use this parameter to skip the checks, for example when the source index does not exist + * until after the transform is created. The validations are always run when you start the transform, however, with + * the exception of privilege checks. */ defer_validation?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The destination for the transform. */ dest: TransformDestination + /** Free text description of the transform. */ description?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. Also + * determines the retry interval in the event of transient failures while the transform is searching or indexing. + * The minimum value is `1s` and the maximum is `1h`. */ frequency?: Duration + /** The latest method transforms the data by finding the latest document for each unique key. */ latest?: TransformLatest + /** Defines optional transform metadata. */ _meta?: Metadata + /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields + * and the aggregation to reduce the data. */ pivot?: TransformPivot + /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the + * destination index. */ retention_policy?: TransformRetentionPolicyContainer + /** Defines optional transform settings. */ settings?: TransformSettings + /** The source of the data for the transform. */ source: TransformSource + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, latest?: never, _meta?: never, pivot?: never, retention_policy?: never, settings?: never, source?: never, sync?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, latest?: never, _meta?: never, pivot?: never, retention_policy?: never, settings?: never, source?: never, sync?: never } } export type TransformPutTransformResponse = AcknowledgedResponseBase export interface TransformResetTransformRequest extends RequestBase { + /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id + /** If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform + * must be stopped before it can be reset. */ force?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, force?: never, timeout?: never } } export type TransformResetTransformResponse = AcknowledgedResponseBase export interface TransformScheduleNowTransformRequest extends RequestBase { + /** Identifier for the transform. */ transform_id: Id + /** Controls the time to wait for the scheduling to take place */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never } } export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase +export interface TransformSetUpgradeModeRequest extends RequestBase { + /** When `true`, it enables `upgrade_mode` which temporarily halts all + * transform tasks and prohibits new transform tasks from + * starting. */ + enabled?: boolean + /** The time to wait for the request to be completed. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { enabled?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { enabled?: never, timeout?: never } +} + +export type TransformSetUpgradeModeResponse = AcknowledgedResponseBase + export interface TransformStartTransformRequest extends RequestBase { + /** Identifier for the transform. */ transform_id: Id + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration + /** Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. */ from?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never, from?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never, from?: never } } export type TransformStartTransformResponse = AcknowledgedResponseBase export interface TransformStopTransformRequest extends RequestBase { + /** Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. + * To stop all transforms, use `_all` or `*` as the identifier. */ transform_id: Name + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; + * contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there + * are only partial matches. + * + * If it is true, the API returns a successful acknowledgement message when there are no matches. When there are + * only partial matches, the API stops the appropriate transforms. + * + * If it is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean + /** If it is true, the API forcefully stops the transforms. */ force?: boolean + /** Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the + * timeout expires, the request returns a timeout exception. However, the request continues processing and + * eventually moves the transform to a STOPPED state. */ timeout?: Duration + /** If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, + * the transform stops as soon as possible. */ wait_for_checkpoint?: boolean + /** If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns + * immediately and the indexer is stopped asynchronously in the background. */ wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, force?: never, timeout?: never, wait_for_checkpoint?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, force?: never, timeout?: never, wait_for_checkpoint?: never, wait_for_completion?: never } } export type TransformStopTransformResponse = AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { + /** Identifier for the transform. */ transform_id: Id + /** When true, deferrable validations are not run. This behavior may be + * desired if the source index does not exist until after the transform is + * created. */ defer_validation?: boolean + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration + /** The destination for the transform. */ dest?: TransformDestination + /** Free text description of the transform. */ description?: string + /** The interval between checks for changes in the source indices when the + * transform is running continuously. Also determines the retry interval in + * the event of transient failures while the transform is searching or + * indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration + /** Defines optional transform metadata. */ _meta?: Metadata + /** The source of the data for the transform. */ source?: TransformSource + /** Defines optional transform settings. */ settings?: TransformSettings + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** Defines a retention policy for the transform. Data that meets the defined + * criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer | null + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, _meta?: never, source?: never, settings?: never, sync?: never, retention_policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, _meta?: never, source?: never, settings?: never, sync?: never, retention_policy?: never } } export interface TransformUpdateTransformResponse { @@ -21756,13 +37084,23 @@ export interface TransformUpdateTransformResponse { } export interface TransformUpgradeTransformsRequest extends RequestBase { + /** When true, the request checks for updates but does not run them. */ dry_run?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and + * returns an error. */ timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, timeout?: never } } export interface TransformUpgradeTransformsResponse { + /** The number of transforms that need to be upgraded. */ needs_update: integer + /** The number of transforms that don’t require upgrading. */ no_action: integer + /** The number of transforms that have been upgraded. */ updated: integer } @@ -21943,7 +37281,9 @@ export interface WatcherExecutionState { export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' export interface WatcherExecutionThreadPool { + /** The largest size of the execution thread pool, which indicates the largest number of concurrent running watches. */ max_size: long + /** The number of watches that were triggered and are currently queued. */ queue_size: long } @@ -22071,6 +37411,7 @@ export interface WatcherPagerDutyEvent { client?: string client_url?: string contexts?: WatcherPagerDutyContext[] + /** @alias contexts */ context?: WatcherPagerDutyContext[] description: string event_type?: WatcherPagerDutyEventType @@ -22158,9 +37499,14 @@ export interface WatcherSearchInputRequestDefinition { export interface WatcherSearchTemplateRequestBody { explain?: boolean + /** ID of the search template to use. If no source is specified, + * this parameter is required. */ id?: Id params?: Record profile?: boolean + /** An inline search template. Supports the same parameters as the search API's + * request body. Also supports Mustache variables. If no id is specified, this + * parameter is required. */ source?: string } @@ -22283,8 +37629,15 @@ export interface WatcherWebhookResult { } export interface WatcherAckWatchRequest extends RequestBase { + /** The watch identifier. */ watch_id: Name + /** A comma-separated list of the action identifiers to acknowledge. + * If you omit this parameter, all of the actions of the watch are acknowledged. */ action_id?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never, action_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never, action_id?: never } } export interface WatcherAckWatchResponse { @@ -22292,7 +37645,12 @@ export interface WatcherAckWatchResponse { } export interface WatcherActivateWatchRequest extends RequestBase { + /** The watch identifier. */ watch_id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never } } export interface WatcherActivateWatchResponse { @@ -22300,7 +37658,12 @@ export interface WatcherActivateWatchResponse { } export interface WatcherDeactivateWatchRequest extends RequestBase { + /** The watch identifier. */ watch_id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never } } export interface WatcherDeactivateWatchResponse { @@ -22308,7 +37671,12 @@ export interface WatcherDeactivateWatchResponse { } export interface WatcherDeleteWatchRequest extends RequestBase { + /** The watch identifier. */ id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface WatcherDeleteWatchResponse { @@ -22318,19 +37686,36 @@ export interface WatcherDeleteWatchResponse { } export interface WatcherExecuteWatchRequest extends RequestBase { + /** The watch identifier. */ id?: Id + /** Defines whether the watch runs in debug mode. */ debug?: boolean + /** Determines how to handle the watch actions as part of the watch execution. */ action_modes?: Record + /** When present, the watch uses this object as a payload instead of executing its own input. */ alternative_input?: Record + /** When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. */ ignore_condition?: boolean + /** When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. + * In addition, the status of the watch is updated, possibly throttling subsequent runs. + * This can also be specified as an HTTP parameter. */ record_execution?: boolean simulated_actions?: WatcherSimulatedActions + /** This structure is parsed as the data of the trigger event that will be used during the watch execution. */ trigger_data?: WatcherScheduleTriggerEvent + /** When present, this watch is used instead of the one specified in the request. + * This watch is not persisted to the index and `record_execution` cannot be set. */ watch?: WatcherWatch + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, debug?: never, action_modes?: never, alternative_input?: never, ignore_condition?: never, record_execution?: never, simulated_actions?: never, trigger_data?: never, watch?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, debug?: never, action_modes?: never, alternative_input?: never, ignore_condition?: never, record_execution?: never, simulated_actions?: never, trigger_data?: never, watch?: never } } export interface WatcherExecuteWatchResponse { + /** The watch record identifier as it would be stored in the `.watcher-history` index. */ _id: Id + /** The watch record document as it would be stored in the `.watcher-history` index. */ watch_record: WatcherExecuteWatchWatchRecord } @@ -22349,7 +37734,13 @@ export interface WatcherExecuteWatchWatchRecord { } export interface WatcherGetSettingsRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface WatcherGetSettingsResponse { @@ -22357,7 +37748,12 @@ export interface WatcherGetSettingsResponse { } export interface WatcherGetWatchRequest extends RequestBase { + /** The watch identifier. */ id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } } export interface WatcherGetWatchResponse { @@ -22371,19 +37767,40 @@ export interface WatcherGetWatchResponse { } export interface WatcherPutWatchRequest extends RequestBase { + /** The identifier for the watch. */ id: Id + /** The initial state of the watch. + * The default value is `true`, which means the watch is active by default. */ active?: boolean + /** only update the watch if the last operation that has changed the watch has the specified primary term */ if_primary_term?: long + /** only update the watch if the last operation that has changed the watch has the specified sequence number */ if_seq_no?: SequenceNumber + /** Explicit version number for concurrency control */ version?: VersionNumber + /** The list of actions that will be run if the condition matches. */ actions?: Record + /** The condition that defines if the actions should be run. */ condition?: WatcherConditionContainer + /** The input that defines the input that loads the data for the watch. */ input?: WatcherInputContainer + /** Metadata JSON that will be copied into the history entries. */ metadata?: Metadata + /** The minimum time between actions being run. + * The default is 5 seconds. + * This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. + * If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period?: Duration + /** Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period_in_millis?: DurationValue + /** The transform that processes the watch payload to prepare it for the watch actions. */ transform?: TransformContainer + /** The trigger that defines when the watch should run. */ trigger?: WatcherTriggerContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, active?: never, if_primary_term?: never, if_seq_no?: never, version?: never, actions?: never, condition?: never, input?: never, metadata?: never, throttle_period?: never, throttle_period_in_millis?: never, transform?: never, trigger?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, active?: never, if_primary_term?: never, if_seq_no?: never, version?: never, actions?: never, condition?: never, input?: never, metadata?: never, throttle_period?: never, throttle_period_in_millis?: never, transform?: never, trigger?: never } } export interface WatcherPutWatchResponse { @@ -22395,27 +37812,51 @@ export interface WatcherPutWatchResponse { } export interface WatcherQueryWatchesRequest extends RequestBase { + /** The offset from the first result to fetch. + * It must be non-negative. */ from?: integer + /** The number of hits to return. + * It must be non-negative. */ size?: integer + /** A query that filters the watches to be returned. */ query?: QueryDslQueryContainer + /** One or more fields used to sort the search results. */ sort?: Sort + /** Retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, query?: never, sort?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, query?: never, sort?: never, search_after?: never } } export interface WatcherQueryWatchesResponse { + /** The total number of watches found. */ count: integer + /** A list of watches based on the `from`, `size`, or `search_after` request body parameters. */ watches: WatcherQueryWatch[] } export interface WatcherStartRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export type WatcherStartResponse = AcknowledgedResponseBase export interface WatcherStatsRequest extends RequestBase { + /** Defines which additional metrics are included in the response. */ metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] + /** Defines whether stack traces are generated for each watch that is running. */ emit_stacktraces?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, emit_stacktraces?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, emit_stacktraces?: never } } export interface WatcherStatsResponse { @@ -22426,24 +37867,42 @@ export interface WatcherStatsResponse { } export interface WatcherStatsWatchRecordQueuedStats { + /** The time the watch was run. + * This is just before the input is being run. */ execution_time: DateTime } export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { + /** The current watch execution phase. */ execution_phase: WatcherExecutionPhase + /** The time the watch was triggered by the trigger engine. */ triggered_time: DateTime executed_actions?: string[] watch_id: Id + /** The watch record identifier. */ watch_record_id: Id } export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches' export interface WatcherStatsWatcherNodeStats { + /** The current executing watches metric gives insight into the watches that are currently being executed by Watcher. + * Additional information is shared per watch that is currently executing. + * This information includes the `watch_id`, the time its execution started and its current execution phase. + * To include this metric, the `metric` option should be set to `current_watches` or `_all`. + * In addition you can also specify the `emit_stacktraces=true` parameter, which adds stack traces for each watch that is being run. + * These stack traces can give you more insight into an execution of a watch. */ current_watches?: WatcherStatsWatchRecordStats[] execution_thread_pool: WatcherExecutionThreadPool + /** Watcher moderates the execution of watches such that their execution won't put too much pressure on the node and its resources. + * If too many watches trigger concurrently and there isn't enough capacity to run them all, some of the watches are queued, waiting for the current running watches to finish.s + * The queued watches metric gives insight on these queued watches. + * + * To include this metric, the `metric` option should include `queued_watches` or `_all`. */ queued_watches?: WatcherStatsWatchRecordQueuedStats[] + /** The number of watches currently registered. */ watch_count: long + /** The current state of Watcher. */ watcher_state: WatcherStatsWatcherState node_id: Id } @@ -22451,16 +37910,31 @@ export interface WatcherStatsWatcherNodeStats { export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' export interface WatcherStopRequest extends RequestBase { + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export type WatcherStopResponse = AcknowledgedResponseBase export interface WatcherUpdateSettingsRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration 'index.auto_expand_replicas'?: string 'index.number_of_replicas'?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, 'index.auto_expand_replicas'?: never, 'index.number_of_replicas'?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, 'index.auto_expand_replicas'?: never, 'index.number_of_replicas'?: never } } export interface WatcherUpdateSettingsResponse { @@ -22486,8 +37960,10 @@ export interface XpackInfoFeatures { data_streams: XpackInfoFeature data_tiers: XpackInfoFeature enrich: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ enterprise_search: XpackInfoFeature eql: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ esql: XpackInfoFeature frozen_indices: XpackInfoFeature graph: XpackInfoFeature @@ -22504,9 +37980,11 @@ export interface XpackInfoFeatures { spatial: XpackInfoFeature sql: XpackInfoFeature transform: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ universal_profiling: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ archive: XpackInfoFeature } @@ -22524,9 +38002,18 @@ export interface XpackInfoNativeCodeInformation { } export interface XpackInfoRequest extends RequestBase { + /** A comma-separated list of the information categories to include in the response. + * For example, `build,license,features`. */ categories?: XpackInfoXPackCategory[] + /** If this param is used it must be set to true */ accept_enterprise?: boolean + /** Defines whether additional human-readable information is included in the response. + * In particular, it adds descriptions and a tag line. */ human?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { categories?: never, accept_enterprise?: never, human?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { categories?: never, accept_enterprise?: never, human?: never } } export interface XpackInfoResponse { @@ -22697,6 +38184,7 @@ export interface XpackUsageJobUsage { export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record + /** Job usage statistics. The `_all` entry is always present and gathers statistics for all jobs. */ jobs: Record node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs @@ -22823,7 +38311,14 @@ export interface XpackUsageRealmCache { } export interface XpackUsageRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } } export interface XpackUsageResponse { @@ -22993,9 +38488,20 @@ export interface SpecUtilsAdditionalProperty { } export interface SpecUtilsCommonQueryParameters { + /** When set to `true` Elasticsearch will include the full stack trace of errors + * when they occur. */ error_trace?: boolean + /** Comma-separated list of filters in dot notation which reduce the response + * returned by Elasticsearch. */ filter_path?: string | string[] + /** When set to `true` will return statistics in a format suitable for humans. + * For example `"exists_time": "1h"` for humans and + * `"eixsts_time_in_millis": 3600000` for computers. When disabled the human + * readable values will be omitted. This makes sense for responses being consumed + * only by machines. */ human?: boolean + /** If set to `true` the returned JSON will be "pretty-formatted". Only use + * this option for debugging only. */ pretty?: boolean } @@ -23003,7 +38509,12 @@ export interface SpecUtilsOverloadOf { } export interface SpecUtilsCommonCatQueryParameters { + /** Specifies the format to return the columnar data in, can be set to + * `text`, `json`, `cbor`, `yaml`, or `smile`. */ format?: string + /** When set to `true` will output available columns. This option + * can't be combined with any other query string option. */ help?: boolean + /** When set to `true` will enable verbose output. */ v?: boolean } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts deleted file mode 100644 index 92340c78d..000000000 --- a/src/api/typesWithBodyKey.ts +++ /dev/null @@ -1,23653 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint-disable @typescript-eslint/array-type */ -/* eslint-disable @typescript-eslint/no-empty-interface */ -/* eslint-disable @typescript-eslint/no-unused-vars */ - -/** - * We are still working on this type, it will arrive soon. - * If it's critical for you, please open an issue. - * https://github.com/elastic/elasticsearch-js - */ -export type TODO = Record - -export interface BulkCreateOperation extends BulkWriteOperation { -} - -export interface BulkDeleteOperation extends BulkOperationBase { -} - -export type BulkFailureStoreStatus = 'not_applicable_or_unknown' | 'used' | 'not_enabled' | 'failed' - -export interface BulkIndexOperation extends BulkWriteOperation { -} - -export interface BulkOperationBase { - _id?: Id - _index?: IndexName - routing?: Routing - if_primary_term?: long - if_seq_no?: SequenceNumber - version?: VersionNumber - version_type?: VersionType -} - -export interface BulkOperationContainer { - index?: BulkIndexOperation - create?: BulkCreateOperation - update?: BulkUpdateOperation - delete?: BulkDeleteOperation -} - -export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' - -export interface BulkRequest extends RequestBase { - index?: IndexName - include_source_on_error?: boolean - list_executed_pipelines?: boolean - pipeline?: string - refresh?: Refresh - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - require_alias?: boolean - require_data_stream?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ - body?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] -} - -export interface BulkResponse { - errors: boolean - items: Partial>[] - took: long - ingest_took?: long -} - -export interface BulkResponseItem { - _id?: string | null - _index: string - status: integer - failure_store?: BulkFailureStoreStatus - error?: ErrorCause - _primary_term?: long - result?: string - _seq_no?: SequenceNumber - _shards?: ShardStatistics - _version?: VersionNumber - forced_refresh?: boolean - get?: InlineGet> -} - -export interface BulkUpdateAction { - detect_noop?: boolean - doc?: TPartialDocument - doc_as_upsert?: boolean - script?: Script | string - scripted_upsert?: boolean - _source?: SearchSourceConfig - upsert?: TDocument -} - -export interface BulkUpdateOperation extends BulkOperationBase { - require_alias?: boolean - retry_on_conflict?: integer -} - -export interface BulkWriteOperation extends BulkOperationBase { - dynamic_templates?: Record - pipeline?: string - require_alias?: boolean -} - -export interface ClearScrollRequest extends RequestBase { - scroll_id?: ScrollIds - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - scroll_id?: ScrollIds - } -} - -export interface ClearScrollResponse { - succeeded: boolean - num_freed: integer -} - -export interface ClosePointInTimeRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - id: Id - } -} - -export interface ClosePointInTimeResponse { - succeeded: boolean - num_freed: integer -} - -export interface CountRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - lenient?: boolean - min_score?: double - preference?: string - routing?: Routing - terminate_after?: long - q?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: QueryDslQueryContainer - } -} - -export interface CountResponse { - count: long - _shards: ShardStatistics -} - -export interface CreateRequest extends RequestBase { - id: Id - index: IndexName - include_source_on_error?: boolean - pipeline?: string - refresh?: Refresh - require_alias?: boolean - require_data_stream?: boolean - routing?: Routing - timeout?: Duration - version?: VersionNumber - version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, use 'document' instead. */ - body?: TDocument -} - -export type CreateResponse = WriteResponseBase - -export interface DeleteRequest extends RequestBase { - id: Id - index: IndexName - if_primary_term?: long - if_seq_no?: SequenceNumber - refresh?: Refresh - routing?: Routing - timeout?: Duration - version?: VersionNumber - version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards -} - -export type DeleteResponse = WriteResponseBase - -export interface DeleteByQueryRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - conflicts?: Conflicts - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - from?: long - ignore_unavailable?: boolean - lenient?: boolean - preference?: string - refresh?: boolean - request_cache?: boolean - requests_per_second?: float - routing?: Routing - q?: string - scroll?: Duration - scroll_size?: long - search_timeout?: Duration - search_type?: SearchType - slices?: Slices - sort?: string[] - stats?: string[] - terminate_after?: long - timeout?: Duration - version?: boolean - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - max_docs?: long - query?: QueryDslQueryContainer - slice?: SlicedScroll - } -} - -export interface DeleteByQueryResponse { - batches?: long - deleted?: long - failures?: BulkIndexByScrollFailure[] - noops?: long - requests_per_second?: float - retries?: Retries - slice_id?: integer - task?: TaskId - throttled?: Duration - throttled_millis?: DurationValue - throttled_until?: Duration - throttled_until_millis?: DurationValue - timed_out?: boolean - took?: DurationValue - total?: long - version_conflicts?: long -} - -export interface DeleteByQueryRethrottleRequest extends RequestBase { - task_id: TaskId - requests_per_second?: float -} - -export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase - -export interface DeleteScriptRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration -} - -export type DeleteScriptResponse = AcknowledgedResponseBase - -export interface ExistsRequest extends RequestBase { - id: Id - index: IndexName - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export type ExistsResponse = boolean - -export interface ExistsSourceRequest extends RequestBase { - id: Id - index: IndexName - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export type ExistsSourceResponse = boolean - -export interface ExplainExplanation { - description: string - details: ExplainExplanationDetail[] - value: float -} - -export interface ExplainExplanationDetail { - description: string - details?: ExplainExplanationDetail[] - value: float -} - -export interface ExplainRequest extends RequestBase { - id: Id - index: IndexName - analyzer?: string - analyze_wildcard?: boolean - default_operator?: QueryDslOperator - df?: string - lenient?: boolean - preference?: string - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - q?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: QueryDslQueryContainer - } -} - -export interface ExplainResponse { - _index: IndexName - _id: Id - matched: boolean - explanation?: ExplainExplanationDetail - get?: InlineGet -} - -export interface FieldCapsFieldCapability { - aggregatable: boolean - indices?: Indices - meta?: Metadata - non_aggregatable_indices?: Indices - non_searchable_indices?: Indices - searchable: boolean - type: string - metadata_field?: boolean - time_series_dimension?: boolean - time_series_metric?: MappingTimeSeriesMetricType - non_dimension_indices?: IndexName[] - metric_conflicts_indices?: IndexName[] -} - -export interface FieldCapsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - include_unmapped?: boolean - filters?: string - types?: string[] - include_empty_fields?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - fields?: Fields - index_filter?: QueryDslQueryContainer - runtime_mappings?: MappingRuntimeFields - } -} - -export interface FieldCapsResponse { - indices: Indices - fields: Record> -} - -export interface GetGetResult { - _index: IndexName - fields?: Record - _ignored?: string[] - found: boolean - _id: Id - _primary_term?: long - _routing?: string - _seq_no?: SequenceNumber - _source?: TDocument - _version?: VersionNumber -} - -export interface GetRequest extends RequestBase { - id: Id - index: IndexName - force_synthetic_source?: boolean - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export type GetResponse = GetGetResult - -export interface GetScriptRequest extends RequestBase { - id: Id - master_timeout?: Duration -} - -export interface GetScriptResponse { - _id: Id - found: boolean - script?: StoredScript -} - -export interface GetScriptContextContext { - methods: GetScriptContextContextMethod[] - name: Name -} - -export interface GetScriptContextContextMethod { - name: Name - return_type: string - params: GetScriptContextContextMethodParam[] -} - -export interface GetScriptContextContextMethodParam { - name: Name - type: string -} - -export interface GetScriptContextRequest extends RequestBase { -} - -export interface GetScriptContextResponse { - contexts: GetScriptContextContext[] -} - -export interface GetScriptLanguagesLanguageContext { - contexts: string[] - language: ScriptLanguage -} - -export interface GetScriptLanguagesRequest extends RequestBase { -} - -export interface GetScriptLanguagesResponse { - language_contexts: GetScriptLanguagesLanguageContext[] - types_allowed: string[] -} - -export interface GetSourceRequest extends RequestBase { - id: Id - index: IndexName - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export type GetSourceResponse = TDocument - -export interface HealthReportBaseIndicator { - status: HealthReportIndicatorHealthStatus - symptom: string - impacts?: HealthReportImpact[] - diagnosis?: HealthReportDiagnosis[] -} - -export interface HealthReportDataStreamLifecycleDetails { - stagnating_backing_indices_count: integer - total_backing_indices_in_error: integer - stagnating_backing_indices?: HealthReportStagnatingBackingIndices[] -} - -export interface HealthReportDataStreamLifecycleIndicator extends HealthReportBaseIndicator { - details?: HealthReportDataStreamLifecycleDetails -} - -export interface HealthReportDiagnosis { - id: string - action: string - affected_resources: HealthReportDiagnosisAffectedResources - cause: string - help_url: string -} - -export interface HealthReportDiagnosisAffectedResources { - indices?: Indices - nodes?: HealthReportIndicatorNode[] - slm_policies?: string[] - feature_states?: string[] - snapshot_repositories?: string[] -} - -export interface HealthReportDiskIndicator extends HealthReportBaseIndicator { - details?: HealthReportDiskIndicatorDetails -} - -export interface HealthReportDiskIndicatorDetails { - indices_with_readonly_block: long - nodes_with_enough_disk_space: long - nodes_over_high_watermark: long - nodes_over_flood_stage_watermark: long - nodes_with_unknown_disk_status: long -} - -export interface HealthReportIlmIndicator extends HealthReportBaseIndicator { - details?: HealthReportIlmIndicatorDetails -} - -export interface HealthReportIlmIndicatorDetails { - ilm_status: LifecycleOperationMode - policies: long - stagnating_indices: integer -} - -export interface HealthReportImpact { - description: string - id: string - impact_areas: HealthReportImpactArea[] - severity: integer -} - -export type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management' - -export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' | 'unavailable' - -export interface HealthReportIndicatorNode { - name: string | null - node_id: string | null -} - -export interface HealthReportIndicators { - master_is_stable?: HealthReportMasterIsStableIndicator - shards_availability?: HealthReportShardsAvailabilityIndicator - disk?: HealthReportDiskIndicator - repository_integrity?: HealthReportRepositoryIntegrityIndicator - data_stream_lifecycle?: HealthReportDataStreamLifecycleIndicator - ilm?: HealthReportIlmIndicator - slm?: HealthReportSlmIndicator - shards_capacity?: HealthReportShardsCapacityIndicator -} - -export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { - details?: HealthReportMasterIsStableIndicatorDetails -} - -export interface HealthReportMasterIsStableIndicatorClusterFormationNode { - name?: string - node_id: string - cluster_formation_message: string -} - -export interface HealthReportMasterIsStableIndicatorDetails { - current_master: HealthReportIndicatorNode - recent_masters: HealthReportIndicatorNode[] - exception_fetching_history?: HealthReportMasterIsStableIndicatorExceptionFetchingHistory - cluster_formation?: HealthReportMasterIsStableIndicatorClusterFormationNode[] -} - -export interface HealthReportMasterIsStableIndicatorExceptionFetchingHistory { - message: string - stack_trace: string -} - -export interface HealthReportRepositoryIntegrityIndicator extends HealthReportBaseIndicator { - details?: HealthReportRepositoryIntegrityIndicatorDetails -} - -export interface HealthReportRepositoryIntegrityIndicatorDetails { - total_repositories?: long - corrupted_repositories?: long - corrupted?: string[] -} - -export interface HealthReportRequest extends RequestBase { - feature?: string | string[] - timeout?: Duration - verbose?: boolean - size?: integer -} - -export interface HealthReportResponse { - cluster_name: string - indicators: HealthReportIndicators - status?: HealthReportIndicatorHealthStatus -} - -export interface HealthReportShardsAvailabilityIndicator extends HealthReportBaseIndicator { - details?: HealthReportShardsAvailabilityIndicatorDetails -} - -export interface HealthReportShardsAvailabilityIndicatorDetails { - creating_primaries: long - creating_replicas: long - initializing_primaries: long - initializing_replicas: long - restarting_primaries: long - restarting_replicas: long - started_primaries: long - started_replicas: long - unassigned_primaries: long - unassigned_replicas: long -} - -export interface HealthReportShardsCapacityIndicator extends HealthReportBaseIndicator { - details?: HealthReportShardsCapacityIndicatorDetails -} - -export interface HealthReportShardsCapacityIndicatorDetails { - data: HealthReportShardsCapacityIndicatorTierDetail - frozen: HealthReportShardsCapacityIndicatorTierDetail -} - -export interface HealthReportShardsCapacityIndicatorTierDetail { - max_shards_in_cluster: integer - current_used_shards?: integer -} - -export interface HealthReportSlmIndicator extends HealthReportBaseIndicator { - details?: HealthReportSlmIndicatorDetails -} - -export interface HealthReportSlmIndicatorDetails { - slm_status: LifecycleOperationMode - policies: long - unhealthy_policies?: HealthReportSlmIndicatorUnhealthyPolicies -} - -export interface HealthReportSlmIndicatorUnhealthyPolicies { - count: long - invocations_since_last_success?: Record -} - -export interface HealthReportStagnatingBackingIndices { - index_name: IndexName - first_occurrence_timestamp: long - retry_count: integer -} - -export interface IndexRequest extends RequestBase { - id?: Id - index: IndexName - if_primary_term?: long - if_seq_no?: SequenceNumber - include_source_on_error?: boolean - op_type?: OpType - pipeline?: string - refresh?: Refresh - routing?: Routing - timeout?: Duration - version?: VersionNumber - version_type?: VersionType - wait_for_active_shards?: WaitForActiveShards - require_alias?: boolean - require_data_stream?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'document' instead. */ - body?: TDocument -} - -export type IndexResponse = WriteResponseBase - -export interface InfoRequest extends RequestBase { -} - -export interface InfoResponse { - cluster_name: Name - cluster_uuid: Uuid - name: Name - tagline: string - version: ElasticsearchVersionInfo -} - -export interface KnnSearchRequest extends RequestBase { - index: Indices - routing?: Routing - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - _source?: SearchSourceConfig - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - stored_fields?: Fields - fields?: Fields - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - knn: KnnSearchQuery - } -} - -export interface KnnSearchResponse { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - fields?: Record - max_score?: double -} - -export interface KnnSearchQuery { - field: Field - query_vector: QueryVector - k: integer - num_candidates: integer -} - -export interface MgetMultiGetError { - error: ErrorCause - _id: Id - _index: IndexName -} - -export interface MgetOperation { - _id: Id - _index?: IndexName - routing?: Routing - _source?: SearchSourceConfig - stored_fields?: Fields - version?: VersionNumber - version_type?: VersionType -} - -export interface MgetRequest extends RequestBase { - index?: IndexName - force_synthetic_source?: boolean - preference?: string - realtime?: boolean - refresh?: boolean - routing?: Routing - _source?: SearchSourceConfigParam - _source_excludes?: Fields - _source_includes?: Fields - stored_fields?: Fields - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs?: MgetOperation[] - ids?: Ids - } -} - -export interface MgetResponse { - docs: MgetResponseItem[] -} - -export type MgetResponseItem = GetGetResult | MgetMultiGetError - -export interface MsearchMultiSearchItem extends SearchResponseBody { - status?: integer -} - -export interface MsearchMultiSearchResult> { - took: long - responses: MsearchResponseItem[] -} - -export interface MsearchMultisearchBody { - aggregations?: Record - aggs?: Record - collapse?: SearchFieldCollapse - query?: QueryDslQueryContainer - explain?: boolean - ext?: Record - stored_fields?: Fields - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnSearch | KnnSearch[] - from?: integer - highlight?: SearchHighlight - indices_boost?: Partial>[] - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - rescore?: SearchRescore | SearchRescore[] - script_fields?: Record - search_after?: SortResults - size?: integer - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - terminate_after?: long - stats?: string[] - timeout?: string - track_scores?: boolean - track_total_hits?: SearchTrackHits - version?: boolean - runtime_mappings?: MappingRuntimeFields - seq_no_primary_term?: boolean - pit?: SearchPointInTimeReference - suggest?: SearchSuggester -} - -export interface MsearchMultisearchHeader { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - index?: Indices - preference?: string - request_cache?: boolean - routing?: Routing - search_type?: SearchType - ccs_minimize_roundtrips?: boolean - allow_partial_search_results?: boolean - ignore_throttled?: boolean -} - -export interface MsearchRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - ccs_minimize_roundtrips?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - include_named_queries_score?: boolean - max_concurrent_searches?: long - max_concurrent_shard_requests?: long - pre_filter_shard_size?: long - rest_total_hits_as_int?: boolean - routing?: Routing - search_type?: SearchType - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'searches' instead. */ - body?: MsearchRequestItem[] -} - -export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody - -export type MsearchResponse> = MsearchMultiSearchResult - -export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase - -export interface MsearchTemplateRequest extends RequestBase { - index?: Indices - ccs_minimize_roundtrips?: boolean - max_concurrent_searches?: long - search_type?: SearchType - rest_total_hits_as_int?: boolean - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'search_templates' instead. */ - body?: MsearchTemplateRequestItem[] -} - -export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig - -export type MsearchTemplateResponse> = MsearchMultiSearchResult - -export interface MsearchTemplateTemplateConfig { - explain?: boolean - id?: Id - params?: Record - profile?: boolean - source?: string -} - -export interface MtermvectorsOperation { - _id?: Id - _index?: IndexName - doc?: any - fields?: Fields - field_statistics?: boolean - filter?: TermvectorsFilter - offsets?: boolean - payloads?: boolean - positions?: boolean - routing?: Routing - term_statistics?: boolean - version?: VersionNumber - version_type?: VersionType -} - -export interface MtermvectorsRequest extends RequestBase { - index?: IndexName - fields?: Fields - field_statistics?: boolean - offsets?: boolean - payloads?: boolean - positions?: boolean - preference?: string - realtime?: boolean - routing?: Routing - term_statistics?: boolean - version?: VersionNumber - version_type?: VersionType - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs?: MtermvectorsOperation[] - ids?: Id[] - } -} - -export interface MtermvectorsResponse { - docs: MtermvectorsTermVectorsResult[] -} - -export interface MtermvectorsTermVectorsResult { - _id?: Id - _index: IndexName - _version?: VersionNumber - took?: long - found?: boolean - term_vectors?: Record - error?: ErrorCause -} - -export interface OpenPointInTimeRequest extends RequestBase { - index: Indices - keep_alive: Duration - ignore_unavailable?: boolean - preference?: string - routing?: Routing - expand_wildcards?: ExpandWildcards - allow_partial_search_results?: boolean - max_concurrent_shard_requests?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - index_filter?: QueryDslQueryContainer - } -} - -export interface OpenPointInTimeResponse { - _shards: ShardStatistics - id: Id -} - -export interface PingRequest extends RequestBase { -} - -export type PingResponse = boolean - -export interface PutScriptRequest extends RequestBase { - id: Id - context?: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - script: StoredScript - } -} - -export type PutScriptResponse = AcknowledgedResponseBase - -export interface RankEvalDocumentRating { - _id: Id - _index: IndexName - rating: integer -} - -export interface RankEvalRankEvalHit { - _id: Id - _index: IndexName - _score: double -} - -export interface RankEvalRankEvalHitItem { - hit: RankEvalRankEvalHit - rating?: double | null -} - -export interface RankEvalRankEvalMetric { - precision?: RankEvalRankEvalMetricPrecision - recall?: RankEvalRankEvalMetricRecall - mean_reciprocal_rank?: RankEvalRankEvalMetricMeanReciprocalRank - dcg?: RankEvalRankEvalMetricDiscountedCumulativeGain - expected_reciprocal_rank?: RankEvalRankEvalMetricExpectedReciprocalRank -} - -export interface RankEvalRankEvalMetricBase { - k?: integer -} - -export interface RankEvalRankEvalMetricDetail { - metric_score: double - unrated_docs: RankEvalUnratedDocument[] - hits: RankEvalRankEvalHitItem[] - metric_details: Record> -} - -export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { - normalize?: boolean -} - -export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { - maximum_relevance: integer -} - -export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEvalMetricRatingTreshold { -} - -export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { - ignore_unlabeled?: boolean -} - -export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { - relevant_rating_threshold?: integer -} - -export interface RankEvalRankEvalMetricRecall extends RankEvalRankEvalMetricRatingTreshold { -} - -export interface RankEvalRankEvalQuery { - query: QueryDslQueryContainer - size?: integer -} - -export interface RankEvalRankEvalRequestItem { - id: Id - request?: RankEvalRankEvalQuery | QueryDslQueryContainer - ratings: RankEvalDocumentRating[] - template_id?: Id - params?: Record -} - -export interface RankEvalRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - search_type?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - requests: RankEvalRankEvalRequestItem[] - metric?: RankEvalRankEvalMetric - } -} - -export interface RankEvalResponse { - metric_score: double - details: Record - failures: Record -} - -export interface RankEvalUnratedDocument { - _id: Id - _index: IndexName -} - -export interface ReindexDestination { - index: IndexName - op_type?: OpType - pipeline?: string - routing?: Routing - version_type?: VersionType -} - -export interface ReindexRemoteSource { - connect_timeout?: Duration - headers?: Record - host: Host - username?: Username - password?: Password - socket_timeout?: Duration -} - -export interface ReindexRequest extends RequestBase { - refresh?: boolean - requests_per_second?: float - scroll?: Duration - slices?: Slices - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - require_alias?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - conflicts?: Conflicts - dest: ReindexDestination - max_docs?: long - script?: Script | string - size?: long - source: ReindexSource - } -} - -export interface ReindexResponse { - batches?: long - created?: long - deleted?: long - failures?: BulkIndexByScrollFailure[] - noops?: long - retries?: Retries - requests_per_second?: float - slice_id?: integer - task?: TaskId - throttled_millis?: EpochTime - throttled_until_millis?: EpochTime - timed_out?: boolean - took?: DurationValue - total?: long - updated?: long - version_conflicts?: long -} - -export interface ReindexSource { - index: Indices - query?: QueryDslQueryContainer - remote?: ReindexRemoteSource - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: Fields - runtime_mappings?: MappingRuntimeFields -} - -export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { - tasks: Record -} - -export interface ReindexRethrottleReindexStatus { - batches: long - created: long - deleted: long - noops: long - requests_per_second: float - retries: Retries - throttled?: Duration - throttled_millis: DurationValue - throttled_until?: Duration - throttled_until_millis: DurationValue - total: long - updated: long - version_conflicts: long -} - -export interface ReindexRethrottleReindexTask { - action: string - cancellable: boolean - description: string - id: long - node: Name - running_time_in_nanos: DurationValue - start_time_in_millis: EpochTime - status: ReindexRethrottleReindexStatus - type: string - headers: HttpHeaders -} - -export interface ReindexRethrottleRequest extends RequestBase { - task_id: Id - requests_per_second?: float -} - -export interface ReindexRethrottleResponse { - nodes: Record -} - -export interface RenderSearchTemplateRequest extends RequestBase { - id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - id?: Id - file?: string - params?: Record - source?: string - } -} - -export interface RenderSearchTemplateResponse { - template_output: Record -} - -export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field' - -export interface ScriptsPainlessExecutePainlessContextSetup { - document: any - index: IndexName - query?: QueryDslQueryContainer -} - -export interface ScriptsPainlessExecuteRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - context?: ScriptsPainlessExecutePainlessContext - context_setup?: ScriptsPainlessExecutePainlessContextSetup - script?: Script | string - } -} - -export interface ScriptsPainlessExecuteResponse { - result: TResult -} - -export interface ScrollRequest extends RequestBase { - scroll_id?: ScrollId - rest_total_hits_as_int?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - scroll?: Duration - scroll_id: ScrollId - } -} - -export type ScrollResponse> = SearchResponseBody - -export interface SearchRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - allow_partial_search_results?: boolean - analyzer?: string - analyze_wildcard?: boolean - batched_reduce_size?: long - ccs_minimize_roundtrips?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - include_named_queries_score?: boolean - lenient?: boolean - max_concurrent_shard_requests?: long - min_compatible_shard_node?: VersionString - preference?: string - pre_filter_shard_size?: long - request_cache?: boolean - routing?: Routing - scroll?: Duration - search_type?: SearchType - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string - typed_keys?: boolean - rest_total_hits_as_int?: boolean - _source_excludes?: Fields - _source_includes?: Fields - q?: string - force_synthetic_source?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - collapse?: SearchFieldCollapse - explain?: boolean - ext?: Record - from?: integer - highlight?: SearchHighlight - track_total_hits?: SearchTrackHits - indices_boost?: Partial>[] - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnSearch | KnnSearch[] - rank?: RankContainer - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - query?: QueryDslQueryContainer - rescore?: SearchRescore | SearchRescore[] - retriever?: RetrieverContainer - script_fields?: Record - search_after?: SortResults - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - suggest?: SearchSuggester - terminate_after?: long - timeout?: string - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean - stored_fields?: Fields - pit?: SearchPointInTimeReference - runtime_mappings?: MappingRuntimeFields - stats?: string[] - } -} - -export type SearchResponse> = SearchResponseBody - -export interface SearchResponseBody> { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - aggregations?: TAggregations - _clusters?: ClusterStatistics - fields?: Record - max_score?: double - num_reduce_phases?: long - profile?: SearchProfile - pit_id?: Id - _scroll_id?: ScrollId - suggest?: Record[]> - terminated_early?: boolean -} - -export interface SearchAggregationBreakdown { - build_aggregation: long - build_aggregation_count: long - build_leaf_collector: long - build_leaf_collector_count: long - collect: long - collect_count: long - initialize: long - initialize_count: long - post_collection?: long - post_collection_count?: long - reduce: long - reduce_count: long -} - -export interface SearchAggregationProfile { - breakdown: SearchAggregationBreakdown - description: string - time_in_nanos: DurationValue - type: string - debug?: SearchAggregationProfileDebug - children?: SearchAggregationProfile[] -} - -export interface SearchAggregationProfileDebug { - segments_with_multi_valued_ords?: integer - collection_strategy?: string - segments_with_single_valued_ords?: integer - total_buckets?: integer - built_buckets?: integer - result_strategy?: string - has_filter?: boolean - delegate?: string - delegate_debug?: SearchAggregationProfileDebug - chars_fetched?: integer - extract_count?: integer - extract_ns?: integer - values_fetched?: integer - collect_analyzed_ns?: integer - collect_analyzed_count?: integer - surviving_buckets?: integer - ordinals_collectors_used?: integer - ordinals_collectors_overhead_too_high?: integer - string_hashing_collectors_used?: integer - numeric_collectors_used?: integer - empty_collectors_used?: integer - deferred_aggregators?: string[] - segments_with_doc_count_field?: integer - segments_with_deleted_docs?: integer - filters?: SearchAggregationProfileDelegateDebugFilter[] - segments_counted?: integer - segments_collected?: integer - map_reducer?: string - brute_force_used?: integer - dynamic_pruning_attempted?: integer - dynamic_pruning_used?: integer - skipped_due_to_no_data?: integer -} - -export interface SearchAggregationProfileDelegateDebugFilter { - results_from_metadata?: integer - query?: string - specialized_for?: string - segments_counted_in_constant_time?: integer -} - -export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' - -export interface SearchCollector { - name: string - reason: string - time_in_nanos: DurationValue - children?: SearchCollector[] -} - -export interface SearchCompletionContext { - boost?: double - context: SearchContext - neighbours?: GeoHashPrecision[] - precision?: GeoHashPrecision - prefix?: boolean -} - -export interface SearchCompletionSuggest extends SearchSuggestBase { - options: SearchCompletionSuggestOption | SearchCompletionSuggestOption[] -} - -export interface SearchCompletionSuggestOption { - collate_match?: boolean - contexts?: Record - fields?: Record - _id?: string - _index?: IndexName - _routing?: Routing - _score?: double - _source?: TDocument - text: string - score?: double -} - -export interface SearchCompletionSuggester extends SearchSuggesterBase { - contexts?: Record - fuzzy?: SearchSuggestFuzziness - regex?: SearchRegexOptions - skip_duplicates?: boolean -} - -export type SearchContext = string | GeoLocation - -export interface SearchDfsKnnProfile { - vector_operations_count?: long - query: SearchKnnQueryProfileResult[] - rewrite_time: long - collector: SearchKnnCollectorResult[] -} - -export interface SearchDfsProfile { - statistics?: SearchDfsStatisticsProfile - knn?: SearchDfsKnnProfile[] -} - -export interface SearchDfsStatisticsBreakdown { - collection_statistics: long - collection_statistics_count: long - create_weight: long - create_weight_count: long - rewrite: long - rewrite_count: long - term_statistics: long - term_statistics_count: long -} - -export interface SearchDfsStatisticsProfile { - type: string - description: string - time?: Duration - time_in_nanos: DurationValue - breakdown: SearchDfsStatisticsBreakdown - debug?: Record - children?: SearchDfsStatisticsProfile[] -} - -export interface SearchDirectGenerator { - field: Field - max_edits?: integer - max_inspections?: float - max_term_freq?: float - min_doc_freq?: float - min_word_length?: integer - post_filter?: string - pre_filter?: string - prefix_length?: integer - size?: integer - suggest_mode?: SuggestMode -} - -export interface SearchFetchProfile { - type: string - description: string - time_in_nanos: DurationValue - breakdown: SearchFetchProfileBreakdown - debug?: SearchFetchProfileDebug - children?: SearchFetchProfile[] -} - -export interface SearchFetchProfileBreakdown { - load_source?: integer - load_source_count?: integer - load_stored_fields?: integer - load_stored_fields_count?: integer - next_reader?: integer - next_reader_count?: integer - process_count?: integer - process?: integer -} - -export interface SearchFetchProfileDebug { - stored_fields?: string[] - fast_path?: integer -} - -export interface SearchFieldCollapse { - field: Field - inner_hits?: SearchInnerHits | SearchInnerHits[] - max_concurrent_group_searches?: integer - collapse?: SearchFieldCollapse -} - -export interface SearchFieldSuggester { - completion?: SearchCompletionSuggester - phrase?: SearchPhraseSuggester - term?: SearchTermSuggester - prefix?: string - regex?: string - text?: string -} - -export interface SearchHighlight extends SearchHighlightBase { - encoder?: SearchHighlighterEncoder - fields: Record -} - -export interface SearchHighlightBase { - type?: SearchHighlighterType - boundary_chars?: string - boundary_max_scan?: integer - boundary_scanner?: SearchBoundaryScanner - boundary_scanner_locale?: string - force_source?: boolean - fragmenter?: SearchHighlighterFragmenter - fragment_size?: integer - highlight_filter?: boolean - highlight_query?: QueryDslQueryContainer - max_fragment_length?: integer - max_analyzed_offset?: integer - no_match_size?: integer - number_of_fragments?: integer - options?: Record - order?: SearchHighlighterOrder - phrase_limit?: integer - post_tags?: string[] - pre_tags?: string[] - require_field_match?: boolean - tags_schema?: SearchHighlighterTagsSchema -} - -export interface SearchHighlightField extends SearchHighlightBase { - fragment_offset?: integer - matched_fields?: Fields -} - -export type SearchHighlighterEncoder = 'default' | 'html' - -export type SearchHighlighterFragmenter = 'simple' | 'span' - -export type SearchHighlighterOrder = 'score' - -export type SearchHighlighterTagsSchema = 'styled' - -export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string - -export interface SearchHit { - _index: IndexName - _id?: Id - _score?: double | null - _explanation?: ExplainExplanation - fields?: Record - highlight?: Record - inner_hits?: Record - matched_queries?: string[] | Record - _nested?: SearchNestedIdentity - _ignored?: string[] - ignored_field_values?: Record - _shard?: string - _node?: string - _routing?: string - _source?: TDocument - _rank?: integer - _seq_no?: SequenceNumber - _primary_term?: long - _version?: VersionNumber - sort?: SortResults -} - -export interface SearchHitsMetadata { - total?: SearchTotalHits | long - hits: SearchHit[] - max_score?: double | null -} - -export interface SearchInnerHits { - name?: Name - size?: integer - from?: integer - collapse?: SearchFieldCollapse - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - explain?: boolean - highlight?: SearchHighlight - ignore_unmapped?: boolean - script_fields?: Record - seq_no_primary_term?: boolean - fields?: Field[] - sort?: Sort - _source?: SearchSourceConfig - stored_fields?: Fields - track_scores?: boolean - version?: boolean -} - -export interface SearchInnerHitsResult { - hits: SearchHitsMetadata -} - -export interface SearchKnnCollectorResult { - name: string - reason: string - time?: Duration - time_in_nanos: DurationValue - children?: SearchKnnCollectorResult[] -} - -export interface SearchKnnQueryProfileBreakdown { - advance: long - advance_count: long - build_scorer: long - build_scorer_count: long - compute_max_score: long - compute_max_score_count: long - count_weight: long - count_weight_count: long - create_weight: long - create_weight_count: long - match: long - match_count: long - next_doc: long - next_doc_count: long - score: long - score_count: long - set_min_competitive_score: long - set_min_competitive_score_count: long - shallow_advance: long - shallow_advance_count: long -} - -export interface SearchKnnQueryProfileResult { - type: string - description: string - time?: Duration - time_in_nanos: DurationValue - breakdown: SearchKnnQueryProfileBreakdown - debug?: Record - children?: SearchKnnQueryProfileResult[] -} - -export interface SearchLaplaceSmoothingModel { - alpha: double -} - -export interface SearchLearningToRank { - model_id: string - params?: Record -} - -export interface SearchLinearInterpolationSmoothingModel { - bigram_lambda: double - trigram_lambda: double - unigram_lambda: double -} - -export interface SearchNestedIdentity { - field: Field - offset: integer - _nested?: SearchNestedIdentity -} - -export interface SearchPhraseSuggest extends SearchSuggestBase { - options: SearchPhraseSuggestOption | SearchPhraseSuggestOption[] -} - -export interface SearchPhraseSuggestCollate { - params?: Record - prune?: boolean - query: SearchPhraseSuggestCollateQuery -} - -export interface SearchPhraseSuggestCollateQuery { - id?: Id - source?: string -} - -export interface SearchPhraseSuggestHighlight { - post_tag: string - pre_tag: string -} - -export interface SearchPhraseSuggestOption { - text: string - score: double - highlighted?: string - collate_match?: boolean -} - -export interface SearchPhraseSuggester extends SearchSuggesterBase { - collate?: SearchPhraseSuggestCollate - confidence?: double - direct_generator?: SearchDirectGenerator[] - force_unigrams?: boolean - gram_size?: integer - highlight?: SearchPhraseSuggestHighlight - max_errors?: double - real_word_error_likelihood?: double - separator?: string - shard_size?: integer - smoothing?: SearchSmoothingModelContainer - text?: string - token_limit?: integer -} - -export interface SearchPointInTimeReference { - id: Id - keep_alive?: Duration -} - -export interface SearchProfile { - shards: SearchShardProfile[] -} - -export interface SearchQueryBreakdown { - advance: long - advance_count: long - build_scorer: long - build_scorer_count: long - create_weight: long - create_weight_count: long - match: long - match_count: long - shallow_advance: long - shallow_advance_count: long - next_doc: long - next_doc_count: long - score: long - score_count: long - compute_max_score: long - compute_max_score_count: long - count_weight: long - count_weight_count: long - set_min_competitive_score: long - set_min_competitive_score_count: long -} - -export interface SearchQueryProfile { - breakdown: SearchQueryBreakdown - description: string - time_in_nanos: DurationValue - type: string - children?: SearchQueryProfile[] -} - -export interface SearchRegexOptions { - flags?: integer | string - max_determinized_states?: integer -} - -export interface SearchRescore { - window_size?: integer - query?: SearchRescoreQuery - learning_to_rank?: SearchLearningToRank -} - -export interface SearchRescoreQuery { - rescore_query: QueryDslQueryContainer - query_weight?: double - rescore_query_weight?: double - score_mode?: SearchScoreMode -} - -export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' - -export interface SearchSearchProfile { - collector: SearchCollector[] - query: SearchQueryProfile[] - rewrite_time: long -} - -export interface SearchShardProfile { - aggregations: SearchAggregationProfile[] - cluster: string - dfs?: SearchDfsProfile - fetch?: SearchFetchProfile - id: string - index: IndexName - node_id: NodeId - searches: SearchSearchProfile[] - shard_id: integer -} - -export interface SearchSmoothingModelContainer { - laplace?: SearchLaplaceSmoothingModel - linear_interpolation?: SearchLinearInterpolationSmoothingModel - stupid_backoff?: SearchStupidBackoffSmoothingModel -} - -export type SearchSourceConfig = boolean | SearchSourceFilter | Fields - -export type SearchSourceConfigParam = boolean | Fields - -export interface SearchSourceFilter { - exclude_vectors?: boolean - excludes?: Fields - exclude?: Fields - includes?: Fields - include?: Fields -} - -export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' - -export interface SearchStupidBackoffSmoothingModel { - discount: double -} - -export type SearchSuggest = SearchCompletionSuggest | SearchPhraseSuggest | SearchTermSuggest - -export interface SearchSuggestBase { - length: integer - offset: integer - text: string -} - -export interface SearchSuggestFuzziness { - fuzziness?: Fuzziness - min_length?: integer - prefix_length?: integer - transpositions?: boolean - unicode_aware?: boolean -} - -export type SearchSuggestSort = 'score' | 'frequency' - -export interface SearchSuggesterKeys { - text?: string -} -export type SearchSuggester = SearchSuggesterKeys -& { [property: string]: SearchFieldSuggester | string } - -export interface SearchSuggesterBase { - field: Field - analyzer?: string - size?: integer -} - -export interface SearchTermSuggest extends SearchSuggestBase { - options: SearchTermSuggestOption | SearchTermSuggestOption[] -} - -export interface SearchTermSuggestOption { - text: string - score: double - freq: long - highlighted?: string - collate_match?: boolean -} - -export interface SearchTermSuggester extends SearchSuggesterBase { - lowercase_terms?: boolean - max_edits?: integer - max_inspections?: integer - max_term_freq?: float - min_doc_freq?: float - min_word_length?: integer - prefix_length?: integer - shard_size?: integer - sort?: SearchSuggestSort - string_distance?: SearchStringDistance - suggest_mode?: SuggestMode - text?: string -} - -export interface SearchTotalHits { - relation: SearchTotalHitsRelation - value: long -} - -export type SearchTotalHitsRelation = 'eq' | 'gte' - -export type SearchTrackHits = boolean | integer - -export interface SearchMvtRequest extends RequestBase { - index: Indices - field: Field - zoom: SearchMvtZoomLevel - x: SearchMvtCoordinate - y: SearchMvtCoordinate - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggs?: Record - buffer?: integer - exact_bounds?: boolean - extent?: integer - fields?: Fields - grid_agg?: SearchMvtGridAggregationType - grid_precision?: integer - grid_type?: SearchMvtGridType - query?: QueryDslQueryContainer - runtime_mappings?: MappingRuntimeFields - size?: integer - sort?: Sort - track_total_hits?: SearchTrackHits - with_labels?: boolean - } -} - -export type SearchMvtResponse = MapboxVectorTiles - -export type SearchMvtCoordinate = integer - -export type SearchMvtGridAggregationType = 'geotile' | 'geohex' - -export type SearchMvtGridType = 'grid' | 'point' | 'centroid' - -export type SearchMvtZoomLevel = integer - -export interface SearchShardsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean - master_timeout?: Duration - preference?: string - routing?: Routing -} - -export interface SearchShardsResponse { - nodes: Record - shards: NodeShard[][] - indices: Record -} - -export interface SearchShardsSearchShardsNodeAttributes { - name: NodeName - ephemeral_id: Id - transport_address: TransportAddress - external_id: string - attributes: Record - roles: NodeRoles - version: VersionString - min_index_version: integer - max_index_version: integer -} - -export interface SearchShardsShardStoreIndex { - aliases?: Name[] - filter?: QueryDslQueryContainer -} - -export interface SearchTemplateRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - ccs_minimize_roundtrips?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - preference?: string - routing?: Routing - scroll?: Duration - search_type?: SearchType - rest_total_hits_as_int?: boolean - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - explain?: boolean - id?: Id - params?: Record - profile?: boolean - source?: string - } -} - -export interface SearchTemplateResponse { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - aggregations?: Record - _clusters?: ClusterStatistics - fields?: Record - max_score?: double - num_reduce_phases?: long - profile?: SearchProfile - pit_id?: Id - _scroll_id?: ScrollId - suggest?: Record[]> - terminated_early?: boolean -} - -export interface TermsEnumRequest extends RequestBase { - index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - field: Field - size?: integer - timeout?: Duration - case_insensitive?: boolean - index_filter?: QueryDslQueryContainer - string?: string - search_after?: string - } -} - -export interface TermsEnumResponse { - _shards: ShardStatistics - terms: string[] - complete: boolean -} - -export interface TermvectorsFieldStatistics { - doc_count: integer - sum_doc_freq: long - sum_ttf: long -} - -export interface TermvectorsFilter { - max_doc_freq?: integer - max_num_terms?: integer - max_term_freq?: integer - max_word_length?: integer - min_doc_freq?: integer - min_term_freq?: integer - min_word_length?: integer -} - -export interface TermvectorsRequest extends RequestBase { - index: IndexName - id?: Id - preference?: string - realtime?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - doc?: TDocument - filter?: TermvectorsFilter - per_field_analyzer?: Record - fields?: Field[] - field_statistics?: boolean - offsets?: boolean - payloads?: boolean - positions?: boolean - term_statistics?: boolean - routing?: Routing - version?: VersionNumber - version_type?: VersionType - } -} - -export interface TermvectorsResponse { - found: boolean - _id?: Id - _index: IndexName - term_vectors?: Record - took: long - _version: VersionNumber -} - -export interface TermvectorsTerm { - doc_freq?: integer - score?: double - term_freq: integer - tokens?: TermvectorsToken[] - ttf?: integer -} - -export interface TermvectorsTermVector { - field_statistics?: TermvectorsFieldStatistics - terms: Record -} - -export interface TermvectorsToken { - end_offset?: integer - payload?: string - position: integer - start_offset?: integer -} - -export interface UpdateRequest extends RequestBase { - id: Id - index: IndexName - if_primary_term?: long - if_seq_no?: SequenceNumber - include_source_on_error?: boolean - lang?: string - refresh?: Refresh - require_alias?: boolean - retry_on_conflict?: integer - routing?: Routing - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - _source_excludes?: Fields - _source_includes?: Fields - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - detect_noop?: boolean - doc?: TPartialDocument - doc_as_upsert?: boolean - script?: Script | string - scripted_upsert?: boolean - _source?: SearchSourceConfig - upsert?: TDocument - } -} - -export type UpdateResponse = UpdateUpdateWriteResponseBase - -export interface UpdateUpdateWriteResponseBase extends WriteResponseBase { - get?: InlineGet -} - -export interface UpdateByQueryRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - from?: long - ignore_unavailable?: boolean - lenient?: boolean - pipeline?: string - preference?: string - q?: string - refresh?: boolean - request_cache?: boolean - requests_per_second?: float - routing?: Routing - scroll?: Duration - scroll_size?: long - search_timeout?: Duration - search_type?: SearchType - slices?: Slices - sort?: string[] - stats?: string[] - terminate_after?: long - timeout?: Duration - version?: boolean - version_type?: boolean - wait_for_active_shards?: WaitForActiveShards - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - max_docs?: long - query?: QueryDslQueryContainer - script?: Script | string - slice?: SlicedScroll - conflicts?: Conflicts - } -} - -export interface UpdateByQueryResponse { - batches?: long - failures?: BulkIndexByScrollFailure[] - noops?: long - deleted?: long - requests_per_second?: float - retries?: Retries - task?: TaskId - timed_out?: boolean - took?: DurationValue - total?: long - updated?: long - version_conflicts?: long - throttled?: Duration - throttled_millis?: DurationValue - throttled_until?: Duration - throttled_until_millis?: DurationValue -} - -export interface UpdateByQueryRethrottleRequest extends RequestBase { - task_id: Id - requests_per_second?: float -} - -export interface UpdateByQueryRethrottleResponse { - nodes: Record -} - -export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { - tasks: Record -} - -export interface SpecUtilsBaseNode { - attributes: Record - host: Host - ip: Ip - name: Name - roles?: NodeRoles - transport_address: TransportAddress -} - -export type SpecUtilsNullValue = null - -export type SpecUtilsPipeSeparatedFlags = T | string - -export type SpecUtilsStringified = T | string - -export type SpecUtilsWithNullValue = T | SpecUtilsNullValue - -export interface AcknowledgedResponseBase { - acknowledged: boolean -} - -export type AggregateName = string - -export interface BulkIndexByScrollFailure { - cause: ErrorCause - id: Id - index: IndexName - status: integer -} - -export interface BulkStats { - total_operations: long - total_time?: Duration - total_time_in_millis: DurationValue - total_size?: ByteSize - total_size_in_bytes: long - avg_time?: Duration - avg_time_in_millis: DurationValue - avg_size?: ByteSize - avg_size_in_bytes: long -} - -export type ByteSize = long | string - -export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' - -export type CategoryId = string - -export type ClusterAlias = string - -export interface ClusterDetails { - status: ClusterSearchStatus - indices: string - took?: DurationValue - timed_out: boolean - _shards?: ShardStatistics - failures?: ShardFailure[] -} - -export type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script' - -export type ClusterInfoTargets = ClusterInfoTarget | ClusterInfoTarget[] - -export type ClusterSearchStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' - -export interface ClusterStatistics { - skipped: integer - successful: integer - total: integer - running: integer - partial: integer - failed: integer - details?: Record -} - -export interface CompletionStats { - size_in_bytes: long - size?: ByteSize - fields?: Record -} - -export type Conflicts = 'abort' | 'proceed' - -export interface CoordsGeoBounds { - top: double - bottom: double - left: double - right: double -} - -export type DFIIndependenceMeasure = 'standardized' | 'saturated' | 'chisquared' - -export type DFRAfterEffect = 'no' | 'b' | 'l' - -export type DFRBasicModel = 'be' | 'd' | 'g' | 'if' | 'in' | 'ine' | 'p' - -export type DataStreamName = string - -export type DataStreamNames = DataStreamName | DataStreamName[] - -export type DateFormat = string - -export type DateMath = string | Date - -export type DateTime = string | EpochTime | Date - -export type Distance = string - -export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' - -export interface DocStats { - count: long - deleted?: long - total_size_in_bytes: long - total_size?: ByteSize -} - -export type Duration = string | -1 | 0 - -export type DurationLarge = string - -export type DurationValue = Unit - -export interface ElasticsearchVersionInfo { - build_date: DateTime - build_flavor: string - build_hash: string - build_snapshot: boolean - build_type: string - lucene_version: VersionString - minimum_index_compatibility_version: VersionString - minimum_wire_compatibility_version: VersionString - number: string -} - -export interface ElasticsearchVersionMinInfo { - build_flavor: string - minimum_index_compatibility_version: VersionString - minimum_wire_compatibility_version: VersionString - number: string -} - -export interface EmptyObject { -} - -export type EpochTime = Unit - -export interface ErrorCauseKeys { - type: string - reason?: string | null - stack_trace?: string - caused_by?: ErrorCause - root_cause?: ErrorCause[] - suppressed?: ErrorCause[] -} -export type ErrorCause = ErrorCauseKeys -& { [property: string]: any } - -export interface ErrorResponseBase { - error: ErrorCause - status: integer -} - -export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' - -export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] - -export type Field = string - -export interface FieldMemoryUsage { - memory_size?: ByteSize - memory_size_in_bytes: long -} - -export interface FieldSizeUsage { - size?: ByteSize - size_in_bytes: long -} - -export interface FieldSort { - missing?: AggregationsMissing - mode?: SortMode - nested?: NestedSortValue - order?: SortOrder - unmapped_type?: MappingFieldType - numeric_type?: FieldSortNumericType - format?: string -} - -export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' - -export type FieldValue = long | double | string | boolean | null | any - -export interface FielddataStats { - evictions?: long - memory_size?: ByteSize - memory_size_in_bytes: long - fields?: Record - global_ordinals: GlobalOrdinalsStats -} - -export type Fields = Field | Field[] - -export interface FlushStats { - periodic: long - total: long - total_time?: Duration - total_time_in_millis: DurationValue -} - -export type Fuzziness = string | integer - -export type GeoBounds = CoordsGeoBounds | TopLeftBottomRightGeoBounds | TopRightBottomLeftGeoBounds | WktGeoBounds - -export interface GeoDistanceSortKeys { - mode?: SortMode - distance_type?: GeoDistanceType - ignore_unmapped?: boolean - order?: SortOrder - unit?: DistanceUnit - nested?: NestedSortValue -} -export type GeoDistanceSort = GeoDistanceSortKeys -& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit | NestedSortValue } - -export type GeoDistanceType = 'arc' | 'plane' - -export type GeoHash = string - -export interface GeoHashLocation { - geohash: GeoHash -} - -export type GeoHashPrecision = number | string - -export type GeoHexCell = string - -export interface GeoLine { - type: string - coordinates: double[][] -} - -export type GeoLocation = LatLonGeoLocation | GeoHashLocation | double[] | string - -export type GeoShape = any - -export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' - -export type GeoTile = string - -export type GeoTilePrecision = number - -export interface GetStats { - current: long - exists_time?: Duration - exists_time_in_millis: DurationValue - exists_total: long - missing_time?: Duration - missing_time_in_millis: DurationValue - missing_total: long - time?: Duration - time_in_millis: DurationValue - total: long -} - -export interface GlobalOrdinalFieldStats { - build_time_in_millis: UnitMillis - build_time?: string - shard_max_value_count: long -} - -export interface GlobalOrdinalsStats { - build_time_in_millis: UnitMillis - build_time?: string - fields?: Record -} - -export type GrokPattern = string - -export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' | 'unknown' | 'unavailable' - -export type Host = string - -export type HttpHeaders = Record - -export type IBDistribution = 'll' | 'spl' - -export type IBLambda = 'df' | 'ttf' - -export type Id = string - -export type Ids = Id | Id[] - -export type IndexAlias = string - -export type IndexName = string - -export type IndexPattern = string - -export type IndexPatterns = IndexPattern[] - -export interface IndexingStats { - index_current: long - delete_current: long - delete_time?: Duration - delete_time_in_millis: DurationValue - delete_total: long - is_throttled: boolean - noop_update_total: long - throttle_time?: Duration - throttle_time_in_millis: DurationValue - index_time?: Duration - index_time_in_millis: DurationValue - index_total: long - index_failed: long - types?: Record - write_load?: double -} - -export type Indices = IndexName | IndexName[] - -export interface IndicesOptions { - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - ignore_throttled?: boolean -} - -export interface IndicesResponseBase extends AcknowledgedResponseBase { - _shards?: ShardStatistics -} - -export interface InlineGetKeys { - fields?: Record - found: boolean - _seq_no?: SequenceNumber - _primary_term?: long - _routing?: Routing - _source?: TDocument -} -export type InlineGet = InlineGetKeys -& { [property: string]: any } - -export interface InnerRetriever { - retriever: RetrieverContainer - weight: float - normalizer: ScoreNormalizer -} - -export type Ip = string - -export interface KnnQuery extends QueryDslQueryBase { - field: Field - query_vector?: QueryVector - query_vector_builder?: QueryVectorBuilder - num_candidates?: integer - k?: integer - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - similarity?: float - rescore_vector?: RescoreVector -} - -export interface KnnRetriever extends RetrieverBase { - field: string - query_vector?: QueryVector - query_vector_builder?: QueryVectorBuilder - k: integer - num_candidates: integer - similarity?: float - rescore_vector?: RescoreVector -} - -export interface KnnSearch { - field: Field - query_vector?: QueryVector - query_vector_builder?: QueryVectorBuilder - k?: integer - num_candidates?: integer - boost?: float - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - similarity?: float - inner_hits?: SearchInnerHits - rescore_vector?: RescoreVector -} - -export interface LatLonGeoLocation { - lat: double - lon: double -} - -export type Level = 'cluster' | 'indices' | 'shards' - -export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' - -export interface LinearRetriever extends RetrieverBase { - retrievers?: InnerRetriever[] - rank_window_size?: integer - query?: string - fields?: string[] - normalizer?: ScoreNormalizer -} - -export type MapboxVectorTiles = ArrayBuffer - -export interface MergesStats { - current: long - current_docs: long - current_size?: string - current_size_in_bytes: long - total: long - total_auto_throttle?: string - total_auto_throttle_in_bytes: long - total_docs: long - total_size?: string - total_size_in_bytes: long - total_stopped_time?: Duration - total_stopped_time_in_millis: DurationValue - total_throttled_time?: Duration - total_throttled_time_in_millis: DurationValue - total_time?: Duration - total_time_in_millis: DurationValue -} - -export type Metadata = Record - -export type Metrics = string | string[] - -export type MinimumShouldMatch = integer | string - -export type MultiTermQueryRewrite = string - -export type Name = string - -export type Names = Name | Name[] - -export type Namespace = string - -export interface NestedSortValue { - filter?: QueryDslQueryContainer - max_children?: integer - nested?: NestedSortValue - path: Field -} - -export interface NodeAttributes { - attributes: Record - ephemeral_id: Id - id?: NodeId - name: NodeName - transport_address: TransportAddress -} - -export type NodeId = string - -export type NodeIds = NodeId | NodeId[] - -export type NodeName = string - -export type NodeRole = 'master' | 'data' | 'data_cold' | 'data_content' | 'data_frozen' | 'data_hot' | 'data_warm' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only' - -export type NodeRoles = NodeRole[] - -export interface NodeShard { - state: IndicesStatsShardRoutingState - primary: boolean - node?: NodeName - shard: integer - index: IndexName - allocation_id?: Record - recovery_source?: Record - unassigned_info?: ClusterAllocationExplainUnassignedInformation - relocating_node?: NodeId | null - relocation_failure_info?: RelocationFailureInfo -} - -export interface NodeStatistics { - failures?: ErrorCause[] - total: integer - successful: integer - failed: integer -} - -export type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z' - -export type OpType = 'index' | 'create' - -export type Password = string - -export type Percentage = string | float - -export interface PinnedRetriever extends RetrieverBase { - retriever: RetrieverContainer - ids?: string[] - docs?: SpecifiedDocument[] - rank_window_size?: integer -} - -export type PipelineName = string - -export interface PluginStats { - classname: string - description: string - elasticsearch_version: VersionString - extended_plugins: string[] - has_native_controller: boolean - java_version: VersionString - name: Name - version: VersionString - licensed: boolean -} - -export type PropertyName = string - -export interface QueryCacheStats { - cache_count: long - cache_size: long - evictions: long - hit_count: long - memory_size?: ByteSize - memory_size_in_bytes: long - miss_count: long - total_count: long -} - -export type QueryVector = float[] - -export interface QueryVectorBuilder { - text_embedding?: TextEmbedding -} - -export interface RRFRetriever extends RetrieverBase { - retrievers: RetrieverContainer[] - rank_constant?: integer - rank_window_size?: integer - query?: string - fields?: string[] -} - -export interface RankBase { -} - -export interface RankContainer { - rrf?: RrfRank -} - -export interface RecoveryStats { - current_as_source: long - current_as_target: long - throttle_time?: Duration - throttle_time_in_millis: DurationValue -} - -export type Refresh = boolean | 'true' | 'false' | 'wait_for' - -export interface RefreshStats { - external_total: long - external_total_time_in_millis: DurationValue - listeners: long - total: long - total_time?: Duration - total_time_in_millis: DurationValue -} - -export type RelationName = string - -export interface RelocationFailureInfo { - failed_attempts: integer -} - -export interface RequestBase extends SpecUtilsCommonQueryParameters { -} - -export interface RequestCacheStats { - evictions: long - hit_count: long - memory_size?: string - memory_size_in_bytes: long - miss_count: long -} - -export interface RescoreVector { - oversample: float -} - -export interface RescorerRetriever extends RetrieverBase { - retriever: RetrieverContainer - rescore: SearchRescore | SearchRescore[] -} - -export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' - -export interface Retries { - bulk: long - search: long -} - -export interface RetrieverBase { - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - min_score?: float - _name?: string -} - -export interface RetrieverContainer { - standard?: StandardRetriever - knn?: KnnRetriever - rrf?: RRFRetriever - text_similarity_reranker?: TextSimilarityReranker - rule?: RuleRetriever - rescorer?: RescorerRetriever - linear?: LinearRetriever - pinned?: PinnedRetriever -} - -export type Routing = string - -export interface RrfRank { - rank_constant?: long - rank_window_size?: long -} - -export interface RuleRetriever extends RetrieverBase { - ruleset_ids: Id | Id[] - match_criteria: any - retriever: RetrieverContainer - rank_window_size?: integer -} - -export type ScalarValue = long | double | string | boolean | null - -export type ScoreNormalizer = 'none' | 'minmax' | 'l2_norm' - -export interface ScoreSort { - order?: SortOrder -} - -export interface Script { - source?: string - id?: Id - params?: Record - lang?: ScriptLanguage - options?: Record -} - -export interface ScriptField { - script: Script | string - ignore_failure?: boolean -} - -export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | string - -export interface ScriptSort { - order?: SortOrder - script: Script | string - type?: ScriptSortType - mode?: SortMode - nested?: NestedSortValue -} - -export type ScriptSortType = 'string' | 'number' | 'version' - -export interface ScriptTransform { - lang?: string - params?: Record - source?: string - id?: string -} - -export type ScrollId = string - -export type ScrollIds = ScrollId | ScrollId[] - -export interface SearchStats { - fetch_current: long - fetch_time?: Duration - fetch_time_in_millis: DurationValue - fetch_total: long - open_contexts?: long - query_current: long - query_time?: Duration - query_time_in_millis: DurationValue - query_total: long - scroll_current: long - scroll_time?: Duration - scroll_time_in_millis: DurationValue - scroll_total: long - suggest_current: long - suggest_time?: Duration - suggest_time_in_millis: DurationValue - suggest_total: long - groups?: Record -} - -export interface SearchTransform { - request: WatcherSearchInputRequestDefinition - timeout: Duration -} - -export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' - -export interface SegmentsStats { - count: integer - doc_values_memory?: ByteSize - doc_values_memory_in_bytes: long - file_sizes: Record - fixed_bit_set?: ByteSize - fixed_bit_set_memory_in_bytes: long - index_writer_memory?: ByteSize - index_writer_memory_in_bytes: long - max_unsafe_auto_id_timestamp: long - memory?: ByteSize - memory_in_bytes: long - norms_memory?: ByteSize - norms_memory_in_bytes: long - points_memory?: ByteSize - points_memory_in_bytes: long - stored_fields_memory_in_bytes: long - stored_fields_memory?: ByteSize - terms_memory_in_bytes: long - terms_memory?: ByteSize - term_vectors_memory?: ByteSize - term_vectors_memory_in_bytes: long - version_map_memory?: ByteSize - version_map_memory_in_bytes: long -} - -export type SequenceNumber = long - -export type Service = string - -export interface ShardFailure { - index?: IndexName - node?: string - reason: ErrorCause - shard: integer - status?: string -} - -export interface ShardStatistics { - failed: uint - successful: uint - total: uint - failures?: ShardFailure[] - skipped?: uint -} - -export interface ShardsOperationResponseBase { - _shards?: ShardStatistics -} - -export interface SlicedScroll { - field?: Field - id: Id - max: integer -} - -export type Slices = integer | SlicesCalculation - -export type SlicesCalculation = 'auto' - -export type Sort = SortCombinations | SortCombinations[] - -export type SortCombinations = Field | SortOptions - -export type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' - -export interface SortOptionsKeys { - _score?: ScoreSort - _doc?: ScoreSort - _geo_distance?: GeoDistanceSort - _script?: ScriptSort -} -export type SortOptions = SortOptionsKeys -& { [property: string]: FieldSort | SortOrder | ScoreSort | GeoDistanceSort | ScriptSort } - -export type SortOrder = 'asc' | 'desc' - -export type SortResults = FieldValue[] - -export interface SpecifiedDocument { - index?: IndexName - id: Id -} - -export interface StandardRetriever extends RetrieverBase { - query?: QueryDslQueryContainer - search_after?: SortResults - terminate_after?: integer - sort?: Sort - collapse?: SearchFieldCollapse -} - -export interface StoreStats { - size?: ByteSize - size_in_bytes: long - reserved?: ByteSize - reserved_in_bytes: long - total_data_set_size?: ByteSize - total_data_set_size_in_bytes?: long -} - -export interface StoredScript { - lang: ScriptLanguage - options?: Record - source: string -} - -export type StreamResult = ArrayBuffer - -export type SuggestMode = 'missing' | 'popular' | 'always' - -export type SuggestionName = string - -export interface TaskFailure { - task_id: long - node_id: NodeId - status: string - reason: ErrorCause -} - -export type TaskId = string | integer - -export interface TextEmbedding { - model_id: string - model_text: string -} - -export interface TextSimilarityReranker extends RetrieverBase { - retriever: RetrieverContainer - rank_window_size?: integer - inference_id?: string - inference_text: string - field: string -} - -export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' - -export type TimeOfDay = string - -export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' - -export type TimeZone = string - -export interface TokenPruningConfig { - tokens_freq_ratio_threshold?: integer - tokens_weight_threshold?: float - only_score_pruned_tokens?: boolean -} - -export interface TopLeftBottomRightGeoBounds { - top_left: GeoLocation - bottom_right: GeoLocation -} - -export interface TopRightBottomLeftGeoBounds { - top_right: GeoLocation - bottom_left: GeoLocation -} - -export interface TransformContainer { - chain?: TransformContainer[] - script?: ScriptTransform - search?: SearchTransform -} - -export interface TranslogStats { - earliest_last_modified_age: long - operations: long - size?: string - size_in_bytes: long - uncommitted_operations: integer - uncommitted_size?: string - uncommitted_size_in_bytes: long -} - -export type TransportAddress = string - -export type UnitFloatMillis = double - -export type UnitMillis = long - -export type UnitNanos = long - -export type UnitSeconds = long - -export type Username = string - -export type Uuid = string - -export type VersionNumber = long - -export type VersionString = string - -export type VersionType = 'internal' | 'external' | 'external_gte' | 'force' - -export type WaitForActiveShardOptions = 'all' | 'index-setting' - -export type WaitForActiveShards = integer | WaitForActiveShardOptions - -export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' - -export interface WarmerStats { - current: long - total: long - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface WktGeoBounds { - wkt: string -} - -export interface WriteResponseBase { - _id: Id - _index: IndexName - _primary_term?: long - result: Result - _seq_no?: SequenceNumber - _shards: ShardStatistics - _version: VersionNumber - forced_refresh?: boolean -} - -export type byte = number - -export type double = number - -export type float = number - -export type integer = number - -export type long = number - -export type short = number - -export type uint = number - -export type ulong = number - -export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { - filters?: Record - separator?: string -} - -export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { - key: string -} -export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys -& { [property: string]: AggregationsAggregate | string | long } - -export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate - -export interface AggregationsAggregateBase { - meta?: Metadata -} - -export type AggregationsAggregateOrder = Partial> | Partial>[] - -export interface AggregationsAggregation { -} - -export interface AggregationsAggregationContainer { - aggregations?: Record - aggs?: Record - meta?: Metadata - adjacency_matrix?: AggregationsAdjacencyMatrixAggregation - auto_date_histogram?: AggregationsAutoDateHistogramAggregation - avg?: AggregationsAverageAggregation - avg_bucket?: AggregationsAverageBucketAggregation - boxplot?: AggregationsBoxplotAggregation - bucket_script?: AggregationsBucketScriptAggregation - bucket_selector?: AggregationsBucketSelectorAggregation - bucket_sort?: AggregationsBucketSortAggregation - bucket_count_ks_test?: AggregationsBucketKsAggregation - bucket_correlation?: AggregationsBucketCorrelationAggregation - cardinality?: AggregationsCardinalityAggregation - categorize_text?: AggregationsCategorizeTextAggregation - children?: AggregationsChildrenAggregation - composite?: AggregationsCompositeAggregation - cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation - cumulative_sum?: AggregationsCumulativeSumAggregation - date_histogram?: AggregationsDateHistogramAggregation - date_range?: AggregationsDateRangeAggregation - derivative?: AggregationsDerivativeAggregation - diversified_sampler?: AggregationsDiversifiedSamplerAggregation - extended_stats?: AggregationsExtendedStatsAggregation - extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation - frequent_item_sets?: AggregationsFrequentItemSetsAggregation - filter?: QueryDslQueryContainer - filters?: AggregationsFiltersAggregation - geo_bounds?: AggregationsGeoBoundsAggregation - geo_centroid?: AggregationsGeoCentroidAggregation - geo_distance?: AggregationsGeoDistanceAggregation - geohash_grid?: AggregationsGeoHashGridAggregation - geo_line?: AggregationsGeoLineAggregation - geotile_grid?: AggregationsGeoTileGridAggregation - geohex_grid?: AggregationsGeohexGridAggregation - global?: AggregationsGlobalAggregation - histogram?: AggregationsHistogramAggregation - ip_range?: AggregationsIpRangeAggregation - ip_prefix?: AggregationsIpPrefixAggregation - inference?: AggregationsInferenceAggregation - line?: AggregationsGeoLineAggregation - matrix_stats?: AggregationsMatrixStatsAggregation - max?: AggregationsMaxAggregation - max_bucket?: AggregationsMaxBucketAggregation - median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation - min?: AggregationsMinAggregation - min_bucket?: AggregationsMinBucketAggregation - missing?: AggregationsMissingAggregation - moving_avg?: AggregationsMovingAverageAggregation - moving_percentiles?: AggregationsMovingPercentilesAggregation - moving_fn?: AggregationsMovingFunctionAggregation - multi_terms?: AggregationsMultiTermsAggregation - nested?: AggregationsNestedAggregation - normalize?: AggregationsNormalizeAggregation - parent?: AggregationsParentAggregation - percentile_ranks?: AggregationsPercentileRanksAggregation - percentiles?: AggregationsPercentilesAggregation - percentiles_bucket?: AggregationsPercentilesBucketAggregation - range?: AggregationsRangeAggregation - rare_terms?: AggregationsRareTermsAggregation - rate?: AggregationsRateAggregation - reverse_nested?: AggregationsReverseNestedAggregation - random_sampler?: AggregationsRandomSamplerAggregation - sampler?: AggregationsSamplerAggregation - scripted_metric?: AggregationsScriptedMetricAggregation - serial_diff?: AggregationsSerialDifferencingAggregation - significant_terms?: AggregationsSignificantTermsAggregation - significant_text?: AggregationsSignificantTextAggregation - stats?: AggregationsStatsAggregation - stats_bucket?: AggregationsStatsBucketAggregation - string_stats?: AggregationsStringStatsAggregation - sum?: AggregationsSumAggregation - sum_bucket?: AggregationsSumBucketAggregation - terms?: AggregationsTermsAggregation - time_series?: AggregationsTimeSeriesAggregation - top_hits?: AggregationsTopHitsAggregation - t_test?: AggregationsTTestAggregation - top_metrics?: AggregationsTopMetricsAggregation - value_count?: AggregationsValueCountAggregation - weighted_avg?: AggregationsWeightedAverageAggregation - variable_width_histogram?: AggregationsVariableWidthHistogramAggregation -} - -export interface AggregationsAggregationRange { - from?: double | null - key?: string - to?: double | null -} - -export interface AggregationsArrayPercentilesItem { - key: double - value: double | null - value_as_string?: string -} - -export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { - interval: DurationLarge -} - -export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { - buckets?: integer - field?: Field - format?: string - minimum_interval?: AggregationsMinimumInterval - missing?: DateTime - offset?: string - params?: Record - script?: Script | string - time_zone?: TimeZone -} - -export interface AggregationsAverageAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsAverageBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsAvgAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase { - min: double - max: double - q1: double - q2: double - q3: double - lower: double - upper: double - min_as_string?: string - max_as_string?: string - q1_as_string?: string - q2_as_string?: string - q3_as_string?: string - lower_as_string?: string - upper_as_string?: string -} - -export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { - compression?: double - execution_hint?: AggregationsTDigestExecutionHint -} - -export interface AggregationsBucketAggregationBase { -} - -export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { - function: AggregationsBucketCorrelationFunction -} - -export interface AggregationsBucketCorrelationFunction { - count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation -} - -export interface AggregationsBucketCorrelationFunctionCountCorrelation { - indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator -} - -export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { - doc_count: integer - expectations: double[] - fractions?: double[] -} - -export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { - alternative?: string[] - fractions?: double[] - sampling_method?: string -} - -export interface AggregationsBucketMetricValueAggregate extends AggregationsSingleMetricAggregateBase { - keys: string[] -} - -export interface AggregationsBucketPathAggregation { - buckets_path?: AggregationsBucketsPath -} - -export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { - script?: Script | string -} - -export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { - script?: Script | string -} - -export interface AggregationsBucketSortAggregation { - from?: integer - gap_policy?: AggregationsGapPolicy - size?: integer - sort?: Sort -} - -export type AggregationsBuckets = Record | TBucket[] - -export type AggregationsBucketsPath = string | string[] | Record - -export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1y' - -export interface AggregationsCardinalityAggregate extends AggregationsAggregateBase { - value: long -} - -export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { - precision_threshold?: integer - rehash?: boolean - execution_hint?: AggregationsCardinalityExecutionMode -} - -export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' - -export interface AggregationsCategorizeTextAggregation { - field: Field - max_unique_tokens?: integer - max_matched_tokens?: integer - similarity_threshold?: integer - categorization_filters?: string[] - categorization_analyzer?: AggregationsCategorizeTextAnalyzer - shard_size?: integer - size?: integer - min_doc_count?: integer - shard_min_doc_count?: integer -} - -export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer - -export interface AggregationsChiSquareHeuristic { - background_is_superset: boolean - include_negatives: boolean -} - -export interface AggregationsChildrenAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { - type?: RelationName -} - -export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { - after_key?: AggregationsCompositeAggregateKey -} - -export type AggregationsCompositeAggregateKey = Record - -export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { - after?: AggregationsCompositeAggregateKey - size?: integer - sources?: Record[] -} - -export interface AggregationsCompositeAggregationBase { - field?: Field - missing_bucket?: boolean - missing_order?: AggregationsMissingOrder - script?: Script | string - value_type?: AggregationsValueType - order?: SortOrder -} - -export interface AggregationsCompositeAggregationSource { - terms?: AggregationsCompositeTermsAggregation - histogram?: AggregationsCompositeHistogramAggregation - date_histogram?: AggregationsCompositeDateHistogramAggregation - geotile_grid?: AggregationsCompositeGeoTileGridAggregation -} - -export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { - key: AggregationsCompositeAggregateKey -} -export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys -& { [property: string]: AggregationsAggregate | AggregationsCompositeAggregateKey | long } - -export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { - format?: string - calendar_interval?: DurationLarge - fixed_interval?: DurationLarge - offset?: Duration - time_zone?: TimeZone -} - -export interface AggregationsCompositeGeoTileGridAggregation extends AggregationsCompositeAggregationBase { - precision?: integer - bounds?: GeoBounds -} - -export interface AggregationsCompositeHistogramAggregation extends AggregationsCompositeAggregationBase { - interval: double -} - -export interface AggregationsCompositeTermsAggregation extends AggregationsCompositeAggregationBase { -} - -export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { - value: long - value_as_string?: string -} - -export interface AggregationsCumulativeCardinalityAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsCustomCategorizeTextAnalyzer { - char_filter?: string[] - tokenizer?: string - filter?: string[] -} - -export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { - calendar_interval?: AggregationsCalendarInterval - extended_bounds?: AggregationsExtendedBounds - hard_bounds?: AggregationsExtendedBounds - field?: Field - fixed_interval?: Duration - format?: string - interval?: Duration - min_doc_count?: integer - missing?: DateTime - offset?: Duration - order?: AggregationsAggregateOrder - params?: Record - script?: Script | string - time_zone?: TimeZone - keyed?: boolean -} - -export interface AggregationsDateHistogramBucketKeys extends AggregationsMultiBucketBase { - key_as_string?: string - key: EpochTime -} -export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys -& { [property: string]: AggregationsAggregate | string | EpochTime | long } - -export interface AggregationsDateRangeAggregate extends AggregationsRangeAggregate { -} - -export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { - field?: Field - format?: string - missing?: AggregationsMissing - ranges?: AggregationsDateRangeExpression[] - time_zone?: TimeZone - keyed?: boolean -} - -export interface AggregationsDateRangeExpression { - from?: AggregationsFieldDateMath - key?: string - to?: AggregationsFieldDateMath -} - -export interface AggregationsDerivativeAggregate extends AggregationsSingleMetricAggregateBase { - normalized_value?: double - normalized_value_as_string?: string -} - -export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { - execution_hint?: AggregationsSamplerAggregationExecutionHint - max_docs_per_value?: integer - script?: Script | string - shard_size?: integer - field?: Field -} - -export interface AggregationsDoubleTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsDoubleTermsBucketKeys extends AggregationsTermsBucketBase { - key: double - key_as_string?: string -} -export type AggregationsDoubleTermsBucket = AggregationsDoubleTermsBucketKeys -& { [property: string]: AggregationsAggregate | double | string | long } - -export interface AggregationsEwmaModelSettings { - alpha?: float -} - -export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'ewma' - settings: AggregationsEwmaModelSettings -} - -export interface AggregationsExtendedBounds { - max?: T - min?: T -} - -export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { - sum_of_squares: double | null - variance: double | null - variance_population: double | null - variance_sampling: double | null - std_deviation: double | null - std_deviation_population: double | null - std_deviation_sampling: double | null - std_deviation_bounds?: AggregationsStandardDeviationBounds - sum_of_squares_as_string?: string - variance_as_string?: string - variance_population_as_string?: string - variance_sampling_as_string?: string - std_deviation_as_string?: string - std_deviation_bounds_as_string?: AggregationsStandardDeviationBoundsAsString -} - -export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { - sigma?: double -} - -export interface AggregationsExtendedStatsBucketAggregate extends AggregationsExtendedStatsAggregate { -} - -export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { - sigma?: double -} - -export type AggregationsFieldDateMath = DateMath | double - -export interface AggregationsFilterAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsFilterAggregate = AggregationsFilterAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { - filters?: AggregationsBuckets - other_bucket?: boolean - other_bucket_key?: string - keyed?: boolean -} - -export interface AggregationsFiltersBucketKeys extends AggregationsMultiBucketBase { - key?: string -} -export type AggregationsFiltersBucket = AggregationsFiltersBucketKeys -& { [property: string]: AggregationsAggregate | string | long } - -export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { - format?: string -} - -export interface AggregationsFormattableMetricAggregation extends AggregationsMetricAggregationBase { - format?: string -} - -export interface AggregationsFrequentItemSetsAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsFrequentItemSetsAggregation { - fields: AggregationsFrequentItemSetsField[] - minimum_set_size?: integer - minimum_support?: double - size?: integer - filter?: QueryDslQueryContainer -} - -export interface AggregationsFrequentItemSetsBucketKeys extends AggregationsMultiBucketBase { - key: Record - support: double -} -export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBucketKeys -& { [property: string]: AggregationsAggregate | Record | double | long } - -export interface AggregationsFrequentItemSetsField { - field: Field - exclude?: AggregationsTermsExclude - include?: AggregationsTermsInclude -} - -export type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values' - -export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { - bounds?: GeoBounds -} - -export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { - wrap_longitude?: boolean -} - -export interface AggregationsGeoCentroidAggregate extends AggregationsAggregateBase { - count: long - location?: GeoLocation -} - -export interface AggregationsGeoCentroidAggregation extends AggregationsMetricAggregationBase { - count?: long - location?: GeoLocation -} - -export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggregate { -} - -export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { - distance_type?: GeoDistanceType - field?: Field - origin?: GeoLocation - ranges?: AggregationsAggregationRange[] - unit?: DistanceUnit -} - -export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { - bounds?: GeoBounds - field?: Field - precision?: GeoHashPrecision - shard_size?: integer - size?: integer -} - -export interface AggregationsGeoHashGridBucketKeys extends AggregationsMultiBucketBase { - key: GeoHash -} -export type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys -& { [property: string]: AggregationsAggregate | GeoHash | long } - -export interface AggregationsGeoHexGridAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsGeoHexGridBucketKeys extends AggregationsMultiBucketBase { - key: GeoHexCell -} -export type AggregationsGeoHexGridBucket = AggregationsGeoHexGridBucketKeys -& { [property: string]: AggregationsAggregate | GeoHexCell | long } - -export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { - type: string - geometry: GeoLine - properties: any -} - -export interface AggregationsGeoLineAggregation { - point: AggregationsGeoLinePoint - sort: AggregationsGeoLineSort - include_sort?: boolean - sort_order?: SortOrder - size?: integer -} - -export interface AggregationsGeoLinePoint { - field: Field -} - -export interface AggregationsGeoLineSort { - field: Field -} - -export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { - field?: Field - precision?: GeoTilePrecision - shard_size?: integer - size?: integer - bounds?: GeoBounds -} - -export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBucketBase { - key: GeoTile -} -export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys -& { [property: string]: AggregationsAggregate | GeoTile | long } - -export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { - field: Field - precision?: integer - bounds?: GeoBounds - size?: integer - shard_size?: integer -} - -export interface AggregationsGlobalAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { -} - -export interface AggregationsGoogleNormalizedDistanceHeuristic { - background_is_superset?: boolean -} - -export interface AggregationsHdrMethod { - number_of_significant_value_digits?: integer -} - -export interface AggregationsHdrPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsHdrPercentilesAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsHistogramAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { - extended_bounds?: AggregationsExtendedBounds - hard_bounds?: AggregationsExtendedBounds - field?: Field - interval?: double - min_doc_count?: integer - missing?: double - offset?: double - order?: AggregationsAggregateOrder - script?: Script | string - format?: string - keyed?: boolean -} - -export interface AggregationsHistogramBucketKeys extends AggregationsMultiBucketBase { - key_as_string?: string - key: double -} -export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys -& { [property: string]: AggregationsAggregate | string | double | long } - -export interface AggregationsHoltLinearModelSettings { - alpha?: float - beta?: float -} - -export interface AggregationsHoltMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'holt' - settings: AggregationsHoltLinearModelSettings -} - -export interface AggregationsHoltWintersModelSettings { - alpha?: float - beta?: float - gamma?: float - pad?: boolean - period?: integer - type?: AggregationsHoltWintersType -} - -export interface AggregationsHoltWintersMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'holt_winters' - settings: AggregationsHoltWintersModelSettings -} - -export type AggregationsHoltWintersType = 'add' | 'mult' - -export interface AggregationsInferenceAggregateKeys extends AggregationsAggregateBase { - value?: FieldValue - feature_importance?: AggregationsInferenceFeatureImportance[] - top_classes?: AggregationsInferenceTopClassEntry[] - warning?: string -} -export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys -& { [property: string]: any } - -export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { - model_id: Name - inference_config?: AggregationsInferenceConfigContainer -} - -export interface AggregationsInferenceClassImportance { - class_name: string - importance: double -} - -export interface AggregationsInferenceConfigContainer { - regression?: MlRegressionInferenceOptions - classification?: MlClassificationInferenceOptions -} - -export interface AggregationsInferenceFeatureImportance { - feature_name: string - importance?: double - classes?: AggregationsInferenceClassImportance[] -} - -export interface AggregationsInferenceTopClassEntry { - class_name: FieldValue - class_probability: double - class_score: double -} - -export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { - field: Field - prefix_length: integer - is_ipv6?: boolean - append_prefix_length?: boolean - keyed?: boolean - min_doc_count?: long -} - -export interface AggregationsIpPrefixBucketKeys extends AggregationsMultiBucketBase { - is_ipv6: boolean - key: string - prefix_length: integer - netmask?: string -} -export type AggregationsIpPrefixBucket = AggregationsIpPrefixBucketKeys -& { [property: string]: AggregationsAggregate | boolean | string | integer | long } - -export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { - field?: Field - ranges?: AggregationsIpRangeAggregationRange[] -} - -export interface AggregationsIpRangeAggregationRange { - from?: string | null - mask?: string - to?: string | null -} - -export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { - key?: string - from?: string - to?: string -} -export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys -& { [property: string]: AggregationsAggregate | string | long } - -export type AggregationsKeyedPercentiles = Record - -export interface AggregationsLinearMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'linear' - settings: EmptyObject -} - -export interface AggregationsLongRareTermsAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsLongRareTermsBucketKeys extends AggregationsMultiBucketBase { - key: long - key_as_string?: string -} -export type AggregationsLongRareTermsBucket = AggregationsLongRareTermsBucketKeys -& { [property: string]: AggregationsAggregate | long | string } - -export interface AggregationsLongTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucketBase { - key: long - key_as_string?: string -} -export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys -& { [property: string]: AggregationsAggregate | long | string } - -export interface AggregationsMatrixAggregation { - fields?: Fields - missing?: Record -} - -export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { - doc_count: long - fields?: AggregationsMatrixStatsFields[] -} - -export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { - mode?: SortMode -} - -export interface AggregationsMatrixStatsFields { - name: Field - count: long - mean: double - variance: double - skewness: double - kurtosis: double - covariance: Record - correlation: Record -} - -export interface AggregationsMaxAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsMaxAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsMaxBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsMedianAbsoluteDeviationAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { - compression?: double - execution_hint?: AggregationsTDigestExecutionHint -} - -export interface AggregationsMetricAggregationBase { - field?: Field - missing?: AggregationsMissing - script?: Script | string -} - -export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsMinAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsMinBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year' - -export type AggregationsMissing = string | integer | double | boolean - -export interface AggregationsMissingAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { - field?: Field - missing?: AggregationsMissing -} - -export type AggregationsMissingOrder = 'first' | 'last' | 'default' - -export type AggregationsMovingAverageAggregation = AggregationsLinearMovingAverageAggregation | AggregationsSimpleMovingAverageAggregation | AggregationsEwmaMovingAverageAggregation | AggregationsHoltMovingAverageAggregation | AggregationsHoltWintersMovingAverageAggregation - -export interface AggregationsMovingAverageAggregationBase extends AggregationsPipelineAggregationBase { - minimize?: boolean - predict?: integer - window?: integer -} - -export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { - script?: string - shift?: integer - window?: integer -} - -export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { - window?: integer - shift?: integer - keyed?: boolean -} - -export interface AggregationsMultiBucketAggregateBase extends AggregationsAggregateBase { - buckets: AggregationsBuckets -} - -export interface AggregationsMultiBucketBase { - doc_count: long -} - -export interface AggregationsMultiTermLookup { - field: Field - missing?: AggregationsMissing -} - -export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { - collect_mode?: AggregationsTermsAggregationCollectMode - order?: AggregationsAggregateOrder - min_doc_count?: long - shard_min_doc_count?: long - shard_size?: integer - show_term_doc_count_error?: boolean - size?: integer - terms: AggregationsMultiTermLookup[] -} - -export interface AggregationsMultiTermsBucketKeys extends AggregationsMultiBucketBase { - key: FieldValue[] - key_as_string?: string - doc_count_error_upper_bound?: long -} -export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys -& { [property: string]: AggregationsAggregate | FieldValue[] | string | long } - -export interface AggregationsMutualInformationHeuristic { - background_is_superset?: boolean - include_negatives?: boolean -} - -export interface AggregationsNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { - path?: Field -} - -export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { - method?: AggregationsNormalizeMethod -} - -export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' - -export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsParentAggregate = AggregationsParentAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { - type?: RelationName -} - -export interface AggregationsPercentageScoreHeuristic { -} - -export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { - keyed?: boolean - values?: double[] | null - hdr?: AggregationsHdrMethod - tdigest?: AggregationsTDigest -} - -export type AggregationsPercentiles = AggregationsKeyedPercentiles | AggregationsArrayPercentilesItem[] - -export interface AggregationsPercentilesAggregateBase extends AggregationsAggregateBase { - values: AggregationsPercentiles -} - -export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { - keyed?: boolean - percents?: double[] - hdr?: AggregationsHdrMethod - tdigest?: AggregationsTDigest -} - -export interface AggregationsPercentilesBucketAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { - percents?: double[] -} - -export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { - format?: string - gap_policy?: AggregationsGapPolicy -} - -export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { - probability: double - seed?: integer - shard_seed?: integer -} - -export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { - field?: Field - missing?: integer - ranges?: AggregationsAggregationRange[] - script?: Script | string - keyed?: boolean - format?: string -} - -export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase { - from?: double - to?: double - from_as_string?: string - to_as_string?: string - key?: string -} -export type AggregationsRangeBucket = AggregationsRangeBucketKeys -& { [property: string]: AggregationsAggregate | double | string | long } - -export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { - exclude?: AggregationsTermsExclude - field?: Field - include?: AggregationsTermsInclude - max_doc_count?: long - missing?: AggregationsMissing - precision?: double - value_type?: string -} - -export interface AggregationsRateAggregate extends AggregationsAggregateBase { - value: double - value_as_string?: string -} - -export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { - unit?: AggregationsCalendarInterval - mode?: AggregationsRateMode -} - -export type AggregationsRateMode = 'sum' | 'value_count' - -export interface AggregationsReverseNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { - path?: Field -} - -export interface AggregationsSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { - shard_size?: integer -} - -export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' - -export interface AggregationsScriptedHeuristic { - script: Script | string -} - -export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { - value: any -} - -export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { - combine_script?: Script | string - init_script?: Script | string - map_script?: Script | string - params?: Record - reduce_script?: Script | string -} - -export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { - lag?: integer -} - -export interface AggregationsSignificantLongTermsAggregate extends AggregationsSignificantTermsAggregateBase { -} - -export interface AggregationsSignificantLongTermsBucketKeys extends AggregationsSignificantTermsBucketBase { - key: long - key_as_string?: string -} -export type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys -& { [property: string]: AggregationsAggregate | long | string | double } - -export interface AggregationsSignificantStringTermsAggregate extends AggregationsSignificantTermsAggregateBase { -} - -export interface AggregationsSignificantStringTermsBucketKeys extends AggregationsSignificantTermsBucketBase { - key: string -} -export type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys -& { [property: string]: AggregationsAggregate | string | double | long } - -export interface AggregationsSignificantTermsAggregateBase extends AggregationsMultiBucketAggregateBase { - bg_count?: long - doc_count?: long -} - -export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { - background_filter?: QueryDslQueryContainer - chi_square?: AggregationsChiSquareHeuristic - exclude?: AggregationsTermsExclude - execution_hint?: AggregationsTermsAggregationExecutionHint - field?: Field - gnd?: AggregationsGoogleNormalizedDistanceHeuristic - include?: AggregationsTermsInclude - jlh?: EmptyObject - min_doc_count?: long - mutual_information?: AggregationsMutualInformationHeuristic - percentage?: AggregationsPercentageScoreHeuristic - script_heuristic?: AggregationsScriptedHeuristic - shard_min_doc_count?: long - shard_size?: integer - size?: integer -} - -export interface AggregationsSignificantTermsBucketBase extends AggregationsMultiBucketBase { - score: double - bg_count: long -} - -export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { - background_filter?: QueryDslQueryContainer - chi_square?: AggregationsChiSquareHeuristic - exclude?: AggregationsTermsExclude - execution_hint?: AggregationsTermsAggregationExecutionHint - field?: Field - filter_duplicate_text?: boolean - gnd?: AggregationsGoogleNormalizedDistanceHeuristic - include?: AggregationsTermsInclude - jlh?: EmptyObject - min_doc_count?: long - mutual_information?: AggregationsMutualInformationHeuristic - percentage?: AggregationsPercentageScoreHeuristic - script_heuristic?: AggregationsScriptedHeuristic - shard_min_doc_count?: long - shard_size?: integer - size?: integer - source_fields?: Fields -} - -export interface AggregationsSimpleMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { - model: 'simple' - settings: EmptyObject -} - -export interface AggregationsSimpleValueAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsSingleBucketAggregateBase extends AggregationsAggregateBase { - doc_count: long -} - -export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { - value: double | null - value_as_string?: string -} - -export interface AggregationsStandardDeviationBounds { - upper: double | null - lower: double | null - upper_population: double | null - lower_population: double | null - upper_sampling: double | null - lower_sampling: double | null -} - -export interface AggregationsStandardDeviationBoundsAsString { - upper: string - lower: string - upper_population: string - lower_population: string - upper_sampling: string - lower_sampling: string -} - -export interface AggregationsStatsAggregate extends AggregationsAggregateBase { - count: long - min: double | null - max: double | null - avg: double | null - sum: double - min_as_string?: string - max_as_string?: string - avg_as_string?: string - sum_as_string?: string -} - -export interface AggregationsStatsAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsStatsBucketAggregate extends AggregationsStatsAggregate { -} - -export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsStringRareTermsAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsStringRareTermsBucketKeys extends AggregationsMultiBucketBase { - key: string -} -export type AggregationsStringRareTermsBucket = AggregationsStringRareTermsBucketKeys -& { [property: string]: AggregationsAggregate | string | long } - -export interface AggregationsStringStatsAggregate extends AggregationsAggregateBase { - count: long - min_length: integer | null - max_length: integer | null - avg_length: double | null - entropy: double | null - distribution?: Record | null - min_length_as_string?: string - max_length_as_string?: string - avg_length_as_string?: string -} - -export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { - show_distribution?: boolean -} - -export interface AggregationsStringTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsStringTermsBucketKeys extends AggregationsTermsBucketBase { - key: FieldValue -} -export type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys -& { [property: string]: AggregationsAggregate | FieldValue | long } - -export interface AggregationsSumAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsSumAggregation extends AggregationsFormatMetricAggregationBase { -} - -export interface AggregationsSumBucketAggregation extends AggregationsPipelineAggregationBase { -} - -export interface AggregationsTDigest { - compression?: integer - execution_hint?: AggregationsTDigestExecutionHint -} - -export type AggregationsTDigestExecutionHint = 'default' | 'high_accuracy' - -export interface AggregationsTDigestPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsTDigestPercentilesAggregate extends AggregationsPercentilesAggregateBase { -} - -export interface AggregationsTTestAggregate extends AggregationsAggregateBase { - value: double | null - value_as_string?: string -} - -export interface AggregationsTTestAggregation { - a?: AggregationsTestPopulation - b?: AggregationsTestPopulation - type?: AggregationsTTestType -} - -export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' - -export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { - doc_count_error_upper_bound?: long - sum_other_doc_count?: long -} - -export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { - collect_mode?: AggregationsTermsAggregationCollectMode - exclude?: AggregationsTermsExclude - execution_hint?: AggregationsTermsAggregationExecutionHint - field?: Field - include?: AggregationsTermsInclude - min_doc_count?: integer - missing?: AggregationsMissing - missing_order?: AggregationsMissingOrder - missing_bucket?: boolean - value_type?: string - order?: AggregationsAggregateOrder - script?: Script | string - shard_min_doc_count?: long - shard_size?: integer - show_term_doc_count_error?: boolean - size?: integer - format?: string -} - -export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first' - -export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' - -export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { - doc_count_error_upper_bound?: long -} - -export type AggregationsTermsExclude = string | string[] - -export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition - -export interface AggregationsTermsPartition { - num_partitions: long - partition: long -} - -export interface AggregationsTestPopulation { - field: Field - script?: Script | string - filter?: QueryDslQueryContainer -} - -export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { - size?: integer - keyed?: boolean -} - -export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase { - key: Record -} -export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys -& { [property: string]: AggregationsAggregate | Record | long } - -export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { - hits: SearchHitsMetadata -} - -export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - explain?: boolean - fields?: (QueryDslFieldAndFormat | Field)[] - from?: integer - highlight?: SearchHighlight - script_fields?: Record - size?: integer - sort?: Sort - _source?: SearchSourceConfig - stored_fields?: Fields - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean -} - -export interface AggregationsTopMetrics { - sort: (FieldValue | null)[] - metrics: Record -} - -export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { - top: AggregationsTopMetrics[] -} - -export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { - metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] - size?: integer - sort?: Sort -} - -export interface AggregationsTopMetricsValue { - field: Field -} - -export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsUnmappedSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { -} -export type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys -& { [property: string]: AggregationsAggregate | long | Metadata } - -export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsSignificantTermsAggregateBase { -} - -export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { -} - -export interface AggregationsValueCountAggregate extends AggregationsSingleMetricAggregateBase { -} - -export interface AggregationsValueCountAggregation extends AggregationsFormattableMetricAggregation { -} - -export type AggregationsValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' - -export interface AggregationsVariableWidthHistogramAggregate extends AggregationsMultiBucketAggregateBase { -} - -export interface AggregationsVariableWidthHistogramAggregation { - field?: Field - buckets?: integer - shard_size?: integer - initial_buffer?: integer - script?: Script | string -} - -export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { - min: double - key: double - max: double - min_as_string?: string - key_as_string?: string - max_as_string?: string -} -export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys -& { [property: string]: AggregationsAggregate | double | string | long } - -export interface AggregationsWeightedAverageAggregation { - format?: string - value?: AggregationsWeightedAverageValue - value_type?: AggregationsValueType - weight?: AggregationsWeightedAverageValue -} - -export interface AggregationsWeightedAverageValue { - field?: Field - missing?: double - script?: Script | string -} - -export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { -} - -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer - -export interface AnalysisApostropheTokenFilter extends AnalysisTokenFilterBase { - type: 'apostrophe' -} - -export interface AnalysisArabicAnalyzer { - type: 'arabic' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'arabic_normalization' -} - -export interface AnalysisArabicStemTokenFilter extends AnalysisTokenFilterBase { - type: 'arabic_stem' -} - -export interface AnalysisArmenianAnalyzer { - type: 'armenian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { - type: 'asciifolding' - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisBasqueAnalyzer { - type: 'basque' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisBengaliAnalyzer { - type: 'bengali' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisBengaliNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'bengali_normalization' -} - -export interface AnalysisBrazilianAnalyzer { - type: 'brazilian' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisBrazilianStemTokenFilter extends AnalysisTokenFilterBase { - type: 'brazilian_stem' -} - -export interface AnalysisBulgarianAnalyzer { - type: 'bulgarian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisCatalanAnalyzer { - type: 'catalan' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export type AnalysisCharFilter = string | AnalysisCharFilterDefinition - -export interface AnalysisCharFilterBase { - version?: VersionString -} - -export type AnalysisCharFilterDefinition = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter - -export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { - type: 'char_group' - tokenize_on_chars: string[] - max_token_length?: integer -} - -export interface AnalysisChineseAnalyzer { - type: 'chinese' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisCjkAnalyzer { - type: 'cjk' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'katakana' - -export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase { - type: 'cjk_bigram' - ignored_scripts?: AnalysisCjkBigramIgnoredScript[] - output_unigrams?: boolean -} - -export interface AnalysisCjkWidthTokenFilter extends AnalysisTokenFilterBase { - type: 'cjk_width' -} - -export interface AnalysisClassicTokenFilter extends AnalysisTokenFilterBase { - type: 'classic' -} - -export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { - type: 'classic' - max_token_length?: integer -} - -export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { - type: 'common_grams' - common_words?: string[] - common_words_path?: string - ignore_case?: boolean - query_mode?: boolean -} - -export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { - max_subword_size?: integer - min_subword_size?: integer - min_word_size?: integer - only_longest_match?: boolean - word_list?: string[] - word_list_path?: string -} - -export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { - type: 'condition' - filter: string[] - script: Script | string -} - -export interface AnalysisCustomAnalyzer { - type: 'custom' - char_filter?: string | string[] - filter?: string | string[] - position_increment_gap?: integer - position_offset_gap?: integer - tokenizer: string -} - -export interface AnalysisCustomNormalizer { - type: 'custom' - char_filter?: string[] - filter?: string[] -} - -export interface AnalysisCzechAnalyzer { - type: 'czech' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisCzechStemTokenFilter extends AnalysisTokenFilterBase { - type: 'czech_stem' -} - -export interface AnalysisDanishAnalyzer { - type: 'danish' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisDecimalDigitTokenFilter extends AnalysisTokenFilterBase { - type: 'decimal_digit' -} - -export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' - -export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { - type: 'delimited_payload' - delimiter?: string - encoding?: AnalysisDelimitedPayloadEncoding -} - -export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { - type: 'dictionary_decompounder' -} - -export interface AnalysisDutchAnalyzer { - type: 'dutch' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisDutchStemTokenFilter extends AnalysisTokenFilterBase { - type: 'dutch_stem' -} - -export type AnalysisEdgeNGramSide = 'front' | 'back' - -export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { - type: 'edge_ngram' - max_gram?: integer - min_gram?: integer - side?: AnalysisEdgeNGramSide - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { - type: 'edge_ngram' - custom_token_chars?: string - max_gram?: integer - min_gram?: integer - token_chars?: AnalysisTokenChar[] -} - -export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { - type: 'elision' - articles?: string[] - articles_path?: string - articles_case?: SpecUtilsStringified -} - -export interface AnalysisEnglishAnalyzer { - type: 'english' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisEstonianAnalyzer { - type: 'estonian' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisFingerprintAnalyzer { - type: 'fingerprint' - version?: VersionString - max_output_size: integer - preserve_original: boolean - separator: string - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { - type: 'fingerprint' - max_output_size?: integer - separator?: string -} - -export interface AnalysisFinnishAnalyzer { - type: 'finnish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisFlattenGraphTokenFilter extends AnalysisTokenFilterBase { - type: 'flatten_graph' -} - -export interface AnalysisFrenchAnalyzer { - type: 'french' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisFrenchStemTokenFilter extends AnalysisTokenFilterBase { - type: 'french_stem' -} - -export interface AnalysisGalicianAnalyzer { - type: 'galician' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisGermanAnalyzer { - type: 'german' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'german_normalization' -} - -export interface AnalysisGermanStemTokenFilter extends AnalysisTokenFilterBase { - type: 'german_stem' -} - -export interface AnalysisGreekAnalyzer { - type: 'greek' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisHindiAnalyzer { - type: 'hindi' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisHindiNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'hindi_normalization' -} - -export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { - type: 'html_strip' - escaped_tags?: string[] -} - -export interface AnalysisHungarianAnalyzer { - type: 'hungarian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { - type: 'hunspell' - dedup?: boolean - dictionary?: string - locale: string - lang: string - language: string - longest_only?: boolean -} - -export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { - type: 'hyphenation_decompounder' - hyphenation_patterns_path: string - no_sub_matches?: boolean - no_overlapping_matches?: boolean -} - -export interface AnalysisIcuAnalyzer { - type: 'icu_analyzer' - method: AnalysisIcuNormalizationType - mode: AnalysisIcuNormalizationMode -} - -export type AnalysisIcuCollationAlternate = 'shifted' | 'non-ignorable' - -export type AnalysisIcuCollationCaseFirst = 'lower' | 'upper' - -export type AnalysisIcuCollationDecomposition = 'no' | 'identical' - -export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' | 'quaternary' | 'identical' - -export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { - type: 'icu_collation' - alternate?: AnalysisIcuCollationAlternate - caseFirst?: AnalysisIcuCollationCaseFirst - caseLevel?: boolean - country?: string - decomposition?: AnalysisIcuCollationDecomposition - hiraganaQuaternaryMode?: boolean - language?: string - numeric?: boolean - rules?: string - strength?: AnalysisIcuCollationStrength - variableTop?: string - variant?: string -} - -export interface AnalysisIcuFoldingTokenFilter extends AnalysisTokenFilterBase { - type: 'icu_folding' - unicode_set_filter: string -} - -export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBase { - type: 'icu_normalizer' - mode?: AnalysisIcuNormalizationMode - name?: AnalysisIcuNormalizationType - unicode_set_filter?: string -} - -export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' - -export interface AnalysisIcuNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'icu_normalizer' - name: AnalysisIcuNormalizationType -} - -export type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf' - -export interface AnalysisIcuTokenizer extends AnalysisTokenizerBase { - type: 'icu_tokenizer' - rule_files: string -} - -export type AnalysisIcuTransformDirection = 'forward' | 'reverse' - -export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase { - type: 'icu_transform' - dir?: AnalysisIcuTransformDirection - id: string -} - -export interface AnalysisIndicNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'indic_normalization' -} - -export interface AnalysisIndonesianAnalyzer { - type: 'indonesian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisIrishAnalyzer { - type: 'irish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisItalianAnalyzer { - type: 'italian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisJaStopTokenFilter extends AnalysisTokenFilterBase { - type: 'ja_stop' - stopwords?: AnalysisStopWords -} - -export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { - type: 'kstem' -} - -export type AnalysisKeepTypesMode = 'include' | 'exclude' - -export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { - type: 'keep_types' - mode?: AnalysisKeepTypesMode - types: string[] -} - -export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { - type: 'keep' - keep_words?: string[] - keep_words_case?: boolean - keep_words_path?: string -} - -export interface AnalysisKeywordAnalyzer { - type: 'keyword' - version?: VersionString -} - -export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { - type: 'keyword_marker' - ignore_case?: boolean - keywords?: string | string[] - keywords_path?: string - keywords_pattern?: string -} - -export interface AnalysisKeywordRepeatTokenFilter extends AnalysisTokenFilterBase { - type: 'keyword_repeat' -} - -export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { - type: 'keyword' - buffer_size?: integer -} - -export interface AnalysisKuromojiAnalyzer { - type: 'kuromoji' - mode: AnalysisKuromojiTokenizationMode - user_dictionary?: string -} - -export interface AnalysisKuromojiIterationMarkCharFilter extends AnalysisCharFilterBase { - type: 'kuromoji_iteration_mark' - normalize_kana: boolean - normalize_kanji: boolean -} - -export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { - type: 'kuromoji_part_of_speech' - stoptags: string[] -} - -export interface AnalysisKuromojiReadingFormTokenFilter extends AnalysisTokenFilterBase { - type: 'kuromoji_readingform' - use_romaji: boolean -} - -export interface AnalysisKuromojiStemmerTokenFilter extends AnalysisTokenFilterBase { - type: 'kuromoji_stemmer' - minimum_length: integer -} - -export type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended' - -export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { - type: 'kuromoji_tokenizer' - discard_punctuation?: boolean - mode: AnalysisKuromojiTokenizationMode - nbest_cost?: integer - nbest_examples?: string - user_dictionary?: string - user_dictionary_rules?: string[] - discard_compound_token?: boolean -} - -export interface AnalysisLatvianAnalyzer { - type: 'latvian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { - type: 'length' - max?: integer - min?: integer -} - -export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { - type: 'letter' -} - -export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { - type: 'limit' - consume_all_tokens?: boolean - max_token_count?: SpecUtilsStringified -} - -export interface AnalysisLithuanianAnalyzer { - type: 'lithuanian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisLowercaseNormalizer { - type: 'lowercase' -} - -export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { - type: 'lowercase' - language?: AnalysisLowercaseTokenFilterLanguages -} - -export type AnalysisLowercaseTokenFilterLanguages = 'greek' | 'irish' | 'turkish' - -export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { - type: 'lowercase' -} - -export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { - type: 'mapping' - mappings?: string[] - mappings_path?: string -} - -export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase { - type: 'min_hash' - bucket_count?: integer - hash_count?: integer - hash_set_size?: integer - with_rotation?: boolean -} - -export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { - type: 'multiplexer' - filters: string[] - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { - type: 'ngram' - max_gram?: integer - min_gram?: integer - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { - type: 'ngram' - custom_token_chars?: string - max_gram?: integer - min_gram?: integer - token_chars?: AnalysisTokenChar[] -} - -export interface AnalysisNoriAnalyzer { - type: 'nori' - version?: VersionString - decompound_mode?: AnalysisNoriDecompoundMode - stoptags?: string[] - user_dictionary?: string -} - -export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' - -export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { - type: 'nori_part_of_speech' - stoptags?: string[] -} - -export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { - type: 'nori_tokenizer' - decompound_mode?: AnalysisNoriDecompoundMode - discard_punctuation?: boolean - user_dictionary?: string - user_dictionary_rules?: string[] -} - -export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer - -export interface AnalysisNorwegianAnalyzer { - type: 'norwegian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { - type: 'path_hierarchy' - buffer_size?: SpecUtilsStringified - delimiter?: string - replacement?: string - reverse?: SpecUtilsStringified - skip?: SpecUtilsStringified -} - -export interface AnalysisPatternAnalyzer { - type: 'pattern' - version?: VersionString - flags?: string - lowercase?: boolean - pattern: string - stopwords?: AnalysisStopWords -} - -export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { - type: 'pattern_capture' - patterns: string[] - preserve_original?: SpecUtilsStringified -} - -export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { - type: 'pattern_replace' - flags?: string - pattern: string - replacement?: string -} - -export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { - type: 'pattern_replace' - all?: boolean - flags?: string - pattern: string - replacement?: string -} - -export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { - type: 'pattern' - flags?: string - group?: integer - pattern?: string -} - -export interface AnalysisPersianAnalyzer { - type: 'persian' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'persian_normalization' -} - -export interface AnalysisPersianStemTokenFilter extends AnalysisTokenFilterBase { - type: 'persian_stem' -} - -export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' - -export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' - -export type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic' - -export type AnalysisPhoneticRuleType = 'approx' | 'exact' - -export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { - type: 'phonetic' - encoder: AnalysisPhoneticEncoder - languageset?: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] - max_code_len?: integer - name_type?: AnalysisPhoneticNameType - replace?: boolean - rule_type?: AnalysisPhoneticRuleType -} - -export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { - type: 'porter_stem' -} - -export interface AnalysisPortugueseAnalyzer { - type: 'portuguese' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { - type: 'predicate_token_filter' - script: Script | string -} - -export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { - type: 'remove_duplicates' -} - -export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { - type: 'reverse' -} - -export interface AnalysisRomanianAnalyzer { - type: 'romanian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisRussianAnalyzer { - type: 'russian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisRussianStemTokenFilter extends AnalysisTokenFilterBase { - type: 'russian_stem' -} - -export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase { - type: 'scandinavian_folding' -} - -export interface AnalysisScandinavianNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'scandinavian_normalization' -} - -export interface AnalysisSerbianAnalyzer { - type: 'serbian' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'serbian_normalization' -} - -export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { - type: 'shingle' - filler_token?: string - max_shingle_size?: SpecUtilsStringified - min_shingle_size?: SpecUtilsStringified - output_unigrams?: boolean - output_unigrams_if_no_shingles?: boolean - token_separator?: string -} - -export interface AnalysisSimpleAnalyzer { - type: 'simple' - version?: VersionString -} - -export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase { - type: 'simple_pattern_split' - pattern?: string -} - -export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase { - type: 'simple_pattern' - pattern?: string -} - -export interface AnalysisSnowballAnalyzer { - type: 'snowball' - version?: VersionString - language: AnalysisSnowballLanguage - stopwords?: AnalysisStopWords -} - -export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Irish' | 'Kp' | 'Lithuanian' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Serbian' | 'Spanish' | 'Swedish' | 'Turkish' - -export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { - type: 'snowball' - language?: AnalysisSnowballLanguage -} - -export interface AnalysisSoraniAnalyzer { - type: 'sorani' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisSoraniNormalizationTokenFilter extends AnalysisTokenFilterBase { - type: 'sorani_normalization' -} - -export interface AnalysisSpanishAnalyzer { - type: 'spanish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisStandardAnalyzer { - type: 'standard' - max_token_length?: integer - stopwords?: AnalysisStopWords -} - -export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { - type: 'standard' - max_token_length?: integer -} - -export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { - type: 'stemmer_override' - rules?: string[] - rules_path?: string -} - -export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { - type: 'stemmer' - language?: string - name?: string -} - -export interface AnalysisStopAnalyzer { - type: 'stop' - version?: VersionString - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { - type: 'stop' - ignore_case?: boolean - remove_trailing?: boolean - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export type AnalysisStopWordLanguage = '_arabic_' | '_armenian_' | '_basque_' | '_bengali_' | '_brazilian_' | '_bulgarian_' | '_catalan_' | '_cjk_' | '_czech_' | '_danish_' | '_dutch_' | '_english_' | '_estonian_' | '_finnish_' | '_french_' | '_galician_' | '_german_' | '_greek_' | '_hindi_' | '_hungarian_' | '_indonesian_' | '_irish_' | '_italian_' | '_latvian_' | '_lithuanian_' | '_norwegian_' | '_persian_' | '_portuguese_' | '_romanian_' | '_russian_' | '_serbian_' | '_sorani_' | '_spanish_' | '_swedish_' | '_thai_' | '_turkish_' | '_none_' - -export type AnalysisStopWords = AnalysisStopWordLanguage | string[] - -export interface AnalysisSwedishAnalyzer { - type: 'swedish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export type AnalysisSynonymFormat = 'solr' | 'wordnet' - -export interface AnalysisSynonymGraphTokenFilter extends AnalysisSynonymTokenFilterBase { - type: 'synonym_graph' -} - -export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBase { - type: 'synonym' -} - -export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase { - expand?: boolean - format?: AnalysisSynonymFormat - lenient?: boolean - synonyms?: string[] - synonyms_path?: string - synonyms_set?: string - tokenizer?: string - updateable?: boolean -} - -export interface AnalysisThaiAnalyzer { - type: 'thai' - stopwords?: AnalysisStopWords - stopwords_path?: string -} - -export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase { - type: 'thai' -} - -export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' - -export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition - -export interface AnalysisTokenFilterBase { - version?: VersionString -} - -export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicStemTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisBengaliNormalizationTokenFilter | AnalysisBrazilianStemTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisCzechStemTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisDutchStemTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisFrenchStemTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisGermanStemTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPersianStemTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisRussianStemTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter - -export type AnalysisTokenizer = string | AnalysisTokenizerDefinition - -export interface AnalysisTokenizerBase { - version?: VersionString -} - -export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer - -export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { - type: 'trim' -} - -export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { - type: 'truncate' - length?: integer -} - -export interface AnalysisTurkishAnalyzer { - type: 'turkish' - stopwords?: AnalysisStopWords - stopwords_path?: string - stem_exclusion?: string[] -} - -export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { - type: 'uax_url_email' - max_token_length?: integer -} - -export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { - type: 'unique' - only_on_same_position?: boolean -} - -export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { - type: 'uppercase' -} - -export interface AnalysisWhitespaceAnalyzer { - type: 'whitespace' - version?: VersionString -} - -export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { - type: 'whitespace' - max_token_length?: integer -} - -export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase { - type: 'word_delimiter_graph' - adjust_offsets?: boolean - ignore_keywords?: boolean -} - -export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterTokenFilterBase { - type: 'word_delimiter' -} - -export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase { - catenate_all?: boolean - catenate_numbers?: boolean - catenate_words?: boolean - generate_number_parts?: boolean - generate_word_parts?: boolean - preserve_original?: SpecUtilsStringified - protected_words?: string[] - protected_words_path?: string - split_on_case_change?: boolean - split_on_numerics?: boolean - stem_english_possessive?: boolean - type_table?: string[] - type_table_path?: string -} - -export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { - type: 'aggregate_metric_double' - default_metric: string - ignore_malformed?: boolean - metrics: string[] - time_series_metric?: MappingTimeSeriesMetricType -} - -export interface MappingAllField { - analyzer: string - enabled: boolean - omit_norms: boolean - search_analyzer: string - similarity: string - store: boolean - store_term_vector_offsets: boolean - store_term_vector_payloads: boolean - store_term_vector_positions: boolean - store_term_vectors: boolean -} - -export interface MappingBinaryProperty extends MappingDocValuesPropertyBase { - type: 'binary' -} - -export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { - boost?: double - fielddata?: IndicesNumericFielddata - index?: boolean - null_value?: boolean - ignore_malformed?: boolean - script?: Script | string - on_script_error?: MappingOnScriptError - time_series_dimension?: boolean - type: 'boolean' -} - -export interface MappingByteNumberProperty extends MappingNumberPropertyBase { - type: 'byte' - null_value?: byte -} - -export interface MappingChunkingSettings { - strategy: string - max_chunk_size: integer - overlap?: integer - sentence_overlap?: integer -} - -export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { - analyzer?: string - contexts?: MappingSuggestContext[] - max_input_length?: integer - preserve_position_increments?: boolean - preserve_separators?: boolean - search_analyzer?: string - type: 'completion' -} - -export interface MappingCompositeSubField { - type: MappingRuntimeFieldType -} - -export interface MappingConstantKeywordProperty extends MappingPropertyBase { - value?: any - type: 'constant_keyword' -} - -export interface MappingCorePropertyBase extends MappingPropertyBase { - copy_to?: Fields - store?: boolean -} - -export interface MappingCountedKeywordProperty extends MappingPropertyBase { - type: 'counted_keyword' - index?: boolean -} - -export interface MappingDataStreamTimestamp { - enabled: boolean -} - -export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { - boost?: double - format?: string - ignore_malformed?: boolean - index?: boolean - script?: Script | string - on_script_error?: MappingOnScriptError - null_value?: DateTime - precision_step?: integer - type: 'date_nanos' -} - -export interface MappingDateProperty extends MappingDocValuesPropertyBase { - boost?: double - fielddata?: IndicesNumericFielddata - format?: string - ignore_malformed?: boolean - index?: boolean - script?: Script | string - on_script_error?: MappingOnScriptError - null_value?: DateTime - precision_step?: integer - locale?: string - type: 'date' -} - -export interface MappingDateRangeProperty extends MappingRangePropertyBase { - format?: string - type: 'date_range' -} - -export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float' - -export interface MappingDenseVectorIndexOptions { - confidence_interval?: float - ef_construction?: integer - m?: integer - type: MappingDenseVectorIndexOptionsType -} - -export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' - -export interface MappingDenseVectorProperty extends MappingPropertyBase { - type: 'dense_vector' - dims?: integer - element_type?: MappingDenseVectorElementType - index?: boolean - index_options?: MappingDenseVectorIndexOptions - similarity?: MappingDenseVectorSimilarity -} - -export type MappingDenseVectorSimilarity = 'cosine' | 'dot_product' | 'l2_norm' | 'max_inner_product' - -export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { - doc_values?: boolean -} - -export interface MappingDoubleNumberProperty extends MappingNumberPropertyBase { - type: 'double' - null_value?: double -} - -export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { - type: 'double_range' -} - -export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' - -export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { - type: '{dynamic_type}' - enabled?: boolean - null_value?: FieldValue - boost?: double - coerce?: boolean - script?: Script | string - on_script_error?: MappingOnScriptError - ignore_malformed?: boolean - time_series_metric?: MappingTimeSeriesMetricType - analyzer?: string - eager_global_ordinals?: boolean - index?: boolean - index_options?: MappingIndexOptions - index_phrases?: boolean - index_prefixes?: MappingTextIndexPrefixes | null - norms?: boolean - position_increment_gap?: integer - search_analyzer?: string - search_quote_analyzer?: string - term_vector?: MappingTermVectorOption - format?: string - precision_step?: integer - locale?: string -} - -export interface MappingDynamicTemplate { - mapping?: MappingProperty - runtime?: MappingRuntimeField - match?: string | string[] - path_match?: string | string[] - unmatch?: string | string[] - path_unmatch?: string | string[] - match_mapping_type?: string | string[] - unmatch_mapping_type?: string | string[] - match_pattern?: MappingMatchType -} - -export interface MappingFieldAliasProperty extends MappingPropertyBase { - path?: Field - type: 'alias' -} - -export interface MappingFieldMapping { - full_name: string - mapping: Partial> -} - -export interface MappingFieldNamesField { - enabled: boolean -} - -export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'passthrough' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'counted_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' - -export interface MappingFlattenedProperty extends MappingPropertyBase { - boost?: double - depth_limit?: integer - doc_values?: boolean - eager_global_ordinals?: boolean - index?: boolean - index_options?: MappingIndexOptions - null_value?: string - similarity?: string - split_queries_on_whitespace?: boolean - time_series_dimensions?: string[] - type: 'flattened' -} - -export interface MappingFloatNumberProperty extends MappingNumberPropertyBase { - type: 'float' - null_value?: float -} - -export interface MappingFloatRangeProperty extends MappingRangePropertyBase { - type: 'float_range' -} - -export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' - -export type MappingGeoPointMetricType = 'gauge' | 'counter' | 'position' - -export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { - ignore_malformed?: boolean - ignore_z_value?: boolean - null_value?: GeoLocation - index?: boolean - on_script_error?: MappingOnScriptError - script?: Script | string - type: 'geo_point' - time_series_metric?: MappingGeoPointMetricType -} - -export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { - coerce?: boolean - ignore_malformed?: boolean - ignore_z_value?: boolean - index?: boolean - orientation?: MappingGeoOrientation - strategy?: MappingGeoStrategy - type: 'geo_shape' -} - -export type MappingGeoStrategy = 'recursive' | 'term' - -export interface MappingHalfFloatNumberProperty extends MappingNumberPropertyBase { - type: 'half_float' - null_value?: float -} - -export interface MappingHistogramProperty extends MappingPropertyBase { - ignore_malformed?: boolean - type: 'histogram' -} - -export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBase { - type: 'icu_collation_keyword' - norms?: boolean - index_options?: MappingIndexOptions - index?: boolean - null_value?: string - rules?: string - language?: string - country?: string - variant?: string - strength?: AnalysisIcuCollationStrength - decomposition?: AnalysisIcuCollationDecomposition - alternate?: AnalysisIcuCollationAlternate - case_level?: boolean - case_first?: AnalysisIcuCollationCaseFirst - numeric?: boolean - variable_top?: string - hiragana_quaternary_mode?: boolean -} - -export interface MappingIndexField { - enabled: boolean -} - -export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' - -export interface MappingIntegerNumberProperty extends MappingNumberPropertyBase { - type: 'integer' - null_value?: integer -} - -export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { - type: 'integer_range' -} - -export interface MappingIpProperty extends MappingDocValuesPropertyBase { - boost?: double - index?: boolean - ignore_malformed?: boolean - null_value?: string - on_script_error?: MappingOnScriptError - script?: Script | string - time_series_dimension?: boolean - type: 'ip' -} - -export interface MappingIpRangeProperty extends MappingRangePropertyBase { - type: 'ip_range' -} - -export interface MappingJoinProperty extends MappingPropertyBase { - relations?: Record - eager_global_ordinals?: boolean - type: 'join' -} - -export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { - boost?: double - eager_global_ordinals?: boolean - index?: boolean - index_options?: MappingIndexOptions - script?: Script | string - on_script_error?: MappingOnScriptError - normalizer?: string - norms?: boolean - null_value?: string - similarity?: string | null - split_queries_on_whitespace?: boolean - time_series_dimension?: boolean - type: 'keyword' -} - -export interface MappingLongNumberProperty extends MappingNumberPropertyBase { - type: 'long' - null_value?: long -} - -export interface MappingLongRangeProperty extends MappingRangePropertyBase { - type: 'long_range' -} - -export interface MappingMatchOnlyTextProperty { - type: 'match_only_text' - fields?: Record - meta?: Record - copy_to?: Fields -} - -export type MappingMatchType = 'simple' | 'regex' - -export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase { - type: 'murmur3' -} - -export interface MappingNestedProperty extends MappingCorePropertyBase { - enabled?: boolean - include_in_parent?: boolean - include_in_root?: boolean - type: 'nested' -} - -export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { - boost?: double - coerce?: boolean - ignore_malformed?: boolean - index?: boolean - on_script_error?: MappingOnScriptError - script?: Script | string - time_series_metric?: MappingTimeSeriesMetricType - time_series_dimension?: boolean -} - -export interface MappingObjectProperty extends MappingCorePropertyBase { - enabled?: boolean - subobjects?: MappingSubobjects - type?: 'object' -} - -export type MappingOnScriptError = 'fail' | 'continue' - -export interface MappingPassthroughObjectProperty extends MappingCorePropertyBase { - type?: 'passthrough' - enabled?: boolean - priority?: integer - time_series_dimension?: boolean -} - -export interface MappingPercolatorProperty extends MappingPropertyBase { - type: 'percolator' -} - -export interface MappingPointProperty extends MappingDocValuesPropertyBase { - ignore_malformed?: boolean - ignore_z_value?: boolean - null_value?: string - type: 'point' -} - -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingRankVectorProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty - -export interface MappingPropertyBase { - meta?: Record - properties?: Record - ignore_above?: integer - dynamic?: MappingDynamicMapping - fields?: Record - synthetic_source_keep?: MappingSyntheticSourceKeepEnum -} - -export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { - boost?: double - coerce?: boolean - index?: boolean -} - -export interface MappingRankFeatureProperty extends MappingPropertyBase { - positive_score_impact?: boolean - type: 'rank_feature' -} - -export interface MappingRankFeaturesProperty extends MappingPropertyBase { - positive_score_impact?: boolean - type: 'rank_features' -} - -export type MappingRankVectorElementType = 'byte' | 'float' | 'bit' - -export interface MappingRankVectorProperty extends MappingPropertyBase { - type: 'rank_vectors' - element_type?: MappingRankVectorElementType - dims?: integer -} - -export interface MappingRoutingField { - required: boolean -} - -export interface MappingRuntimeField { - fields?: Record - fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] - format?: string - input_field?: Field - target_field?: Field - target_index?: IndexName - script?: Script | string - type: MappingRuntimeFieldType -} - -export interface MappingRuntimeFieldFetchFields { - field: Field - format?: string -} - -export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'geo_shape' | 'ip' | 'keyword' | 'long' | 'lookup' - -export type MappingRuntimeFields = Record - -export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { - type: 'scaled_float' - null_value?: double - scaling_factor?: double -} - -export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase { - analyzer?: string - index?: boolean - index_options?: MappingIndexOptions - max_shingle_size?: integer - norms?: boolean - search_analyzer?: string - search_quote_analyzer?: string - similarity?: string | null - term_vector?: MappingTermVectorOption - type: 'search_as_you_type' -} - -export interface MappingSemanticTextIndexOptions { - dense_vector?: MappingDenseVectorIndexOptions -} - -export interface MappingSemanticTextProperty { - type: 'semantic_text' - meta?: Record - inference_id?: Id - search_inference_id?: Id - index_options?: MappingSemanticTextIndexOptions - chunking_settings?: MappingChunkingSettings -} - -export interface MappingShapeProperty extends MappingDocValuesPropertyBase { - coerce?: boolean - ignore_malformed?: boolean - ignore_z_value?: boolean - orientation?: MappingGeoOrientation - type: 'shape' -} - -export interface MappingShortNumberProperty extends MappingNumberPropertyBase { - type: 'short' - null_value?: short -} - -export interface MappingSizeField { - enabled: boolean -} - -export interface MappingSourceField { - compress?: boolean - compress_threshold?: string - enabled?: boolean - excludes?: string[] - includes?: string[] - mode?: MappingSourceFieldMode -} - -export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' - -export interface MappingSparseVectorIndexOptions { - prune?: boolean - pruning_config?: TokenPruningConfig -} - -export interface MappingSparseVectorProperty extends MappingPropertyBase { - store?: boolean - type: 'sparse_vector' - index_options?: MappingSparseVectorIndexOptions -} - -export type MappingSubobjects = boolean | 'true' | 'false' | 'auto' - -export interface MappingSuggestContext { - name: Name - path?: Field - type: string - precision?: integer | string -} - -export type MappingSyntheticSourceKeepEnum = 'none' | 'arrays' | 'all' - -export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' - -export interface MappingTextIndexPrefixes { - max_chars: integer - min_chars: integer -} - -export interface MappingTextProperty extends MappingCorePropertyBase { - analyzer?: string - boost?: double - eager_global_ordinals?: boolean - fielddata?: boolean - fielddata_frequency_filter?: IndicesFielddataFrequencyFilter - index?: boolean - index_options?: MappingIndexOptions - index_phrases?: boolean - index_prefixes?: MappingTextIndexPrefixes | null - norms?: boolean - position_increment_gap?: integer - search_analyzer?: string - search_quote_analyzer?: string - similarity?: string | null - term_vector?: MappingTermVectorOption - type: 'text' -} - -export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' | 'position' - -export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { - analyzer?: string - boost?: double - index?: boolean - null_value?: double - enable_position_increments?: boolean - type: 'token_count' -} - -export interface MappingTypeMapping { - all_field?: MappingAllField - date_detection?: boolean - dynamic?: MappingDynamicMapping - dynamic_date_formats?: string[] - dynamic_templates?: Partial>[] - _field_names?: MappingFieldNamesField - index_field?: MappingIndexField - _meta?: Metadata - numeric_detection?: boolean - properties?: Record - _routing?: MappingRoutingField - _size?: MappingSizeField - _source?: MappingSourceField - runtime?: Record - enabled?: boolean - subobjects?: MappingSubobjects - _data_stream_timestamp?: MappingDataStreamTimestamp -} - -export interface MappingUnsignedLongNumberProperty extends MappingNumberPropertyBase { - type: 'unsigned_long' - null_value?: ulong -} - -export interface MappingVersionProperty extends MappingDocValuesPropertyBase { - type: 'version' -} - -export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { - type: 'wildcard' - null_value?: string -} - -export interface QueryDslBoolQuery extends QueryDslQueryBase { - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - minimum_should_match?: MinimumShouldMatch - must?: QueryDslQueryContainer | QueryDslQueryContainer[] - must_not?: QueryDslQueryContainer | QueryDslQueryContainer[] - should?: QueryDslQueryContainer | QueryDslQueryContainer[] -} - -export interface QueryDslBoostingQuery extends QueryDslQueryBase { - negative_boost: double - negative: QueryDslQueryContainer - positive: QueryDslQueryContainer -} - -export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' - -export type QueryDslCombinedFieldsOperator = 'or' | 'and' - -export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { - fields: Field[] - query: string - auto_generate_synonyms_phrase_query?: boolean - operator?: QueryDslCombinedFieldsOperator - minimum_should_match?: MinimumShouldMatch - zero_terms_query?: QueryDslCombinedFieldsZeroTerms -} - -export type QueryDslCombinedFieldsZeroTerms = 'none' | 'all' - -export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { - analyzer?: string - cutoff_frequency?: double - high_freq_operator?: QueryDslOperator - low_freq_operator?: QueryDslOperator - minimum_should_match?: MinimumShouldMatch - query: string -} - -export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { - filter: QueryDslQueryContainer -} - -export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { -} -export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } - -export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { -} - -export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { - format?: DateFormat - time_zone?: TimeZone -} - -export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction - -export interface QueryDslDecayFunctionBase { - multi_value_mode?: QueryDslMultiValueMode -} - -export interface QueryDslDecayPlacement { - decay?: double - offset?: TScale - scale?: TScale - origin?: TOrigin -} - -export interface QueryDslDisMaxQuery extends QueryDslQueryBase { - queries: QueryDslQueryContainer[] - tie_breaker?: double -} - -export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery - -export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { - origin: TOrigin - pivot: TDistance - field: Field -} - -export interface QueryDslExistsQuery extends QueryDslQueryBase { - field: Field -} - -export interface QueryDslFieldAndFormat { - field: Field - format?: string - include_unmapped?: boolean -} - -export interface QueryDslFieldLookup { - id: Id - index?: IndexName - path?: Field - routing?: Routing -} - -export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' - -export interface QueryDslFieldValueFactorScoreFunction { - field: Field - factor?: double - missing?: double - modifier?: QueryDslFieldValueFactorModifier -} - -export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' - -export interface QueryDslFunctionScoreContainer { - exp?: QueryDslDecayFunction - gauss?: QueryDslDecayFunction - linear?: QueryDslDecayFunction - field_value_factor?: QueryDslFieldValueFactorScoreFunction - random_score?: QueryDslRandomScoreFunction - script_score?: QueryDslScriptScoreFunction - filter?: QueryDslQueryContainer - weight?: double -} - -export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' - -export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { - boost_mode?: QueryDslFunctionBoostMode - functions?: QueryDslFunctionScoreContainer[] - max_boost?: double - min_score?: double - query?: QueryDslQueryContainer - score_mode?: QueryDslFunctionScoreMode -} - -export interface QueryDslFuzzyQuery extends QueryDslQueryBase { - max_expansions?: integer - prefix_length?: integer - rewrite?: MultiTermQueryRewrite - transpositions?: boolean - fuzziness?: Fuzziness - value: string | double | boolean -} - -export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { - type?: QueryDslGeoExecution - validation_method?: QueryDslGeoValidationMethod - ignore_unmapped?: boolean -} -export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys -& { [property: string]: GeoBounds | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } - -export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { -} -export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } - -export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { -} - -export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { - distance: Distance - distance_type?: GeoDistanceType - validation_method?: QueryDslGeoValidationMethod - ignore_unmapped?: boolean -} -export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys -& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | boolean | float | string } - -export type QueryDslGeoExecution = 'memory' | 'indexed' - -export interface QueryDslGeoGridQuery extends QueryDslQueryBase { - geotile?: GeoTile - geohash?: GeoHash - geohex?: GeoHexCell -} - -export interface QueryDslGeoPolygonPoints { - points: GeoLocation[] -} - -export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { - validation_method?: QueryDslGeoValidationMethod - ignore_unmapped?: boolean -} -export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys -& { [property: string]: QueryDslGeoPolygonPoints | QueryDslGeoValidationMethod | boolean | float | string } - -export interface QueryDslGeoShapeFieldQuery { - shape?: GeoShape - indexed_shape?: QueryDslFieldLookup - relation?: GeoShapeRelation -} - -export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { - ignore_unmapped?: boolean -} -export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys -& { [property: string]: QueryDslGeoShapeFieldQuery | boolean | float | string } - -export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' - -export interface QueryDslHasChildQuery extends QueryDslQueryBase { - ignore_unmapped?: boolean - inner_hits?: SearchInnerHits - max_children?: integer - min_children?: integer - query: QueryDslQueryContainer - score_mode?: QueryDslChildScoreMode - type: RelationName -} - -export interface QueryDslHasParentQuery extends QueryDslQueryBase { - ignore_unmapped?: boolean - inner_hits?: SearchInnerHits - parent_type: RelationName - query: QueryDslQueryContainer - score?: boolean -} - -export interface QueryDslIdsQuery extends QueryDslQueryBase { - values?: Ids -} - -export interface QueryDslIntervalsAllOf { - intervals: QueryDslIntervalsContainer[] - max_gaps?: integer - ordered?: boolean - filter?: QueryDslIntervalsFilter -} - -export interface QueryDslIntervalsAnyOf { - intervals: QueryDslIntervalsContainer[] - filter?: QueryDslIntervalsFilter -} - -export interface QueryDslIntervalsContainer { - all_of?: QueryDslIntervalsAllOf - any_of?: QueryDslIntervalsAnyOf - fuzzy?: QueryDslIntervalsFuzzy - match?: QueryDslIntervalsMatch - prefix?: QueryDslIntervalsPrefix - range?: QueryDslIntervalsRange - regexp?: QueryDslIntervalsRegexp - wildcard?: QueryDslIntervalsWildcard -} - -export interface QueryDslIntervalsFilter { - after?: QueryDslIntervalsContainer - before?: QueryDslIntervalsContainer - contained_by?: QueryDslIntervalsContainer - containing?: QueryDslIntervalsContainer - not_contained_by?: QueryDslIntervalsContainer - not_containing?: QueryDslIntervalsContainer - not_overlapping?: QueryDslIntervalsContainer - overlapping?: QueryDslIntervalsContainer - script?: Script | string -} - -export interface QueryDslIntervalsFuzzy { - analyzer?: string - fuzziness?: Fuzziness - prefix_length?: integer - term: string - transpositions?: boolean - use_field?: Field -} - -export interface QueryDslIntervalsMatch { - analyzer?: string - max_gaps?: integer - ordered?: boolean - query: string - use_field?: Field - filter?: QueryDslIntervalsFilter -} - -export interface QueryDslIntervalsPrefix { - analyzer?: string - prefix: string - use_field?: Field -} - -export interface QueryDslIntervalsQuery extends QueryDslQueryBase { - all_of?: QueryDslIntervalsAllOf - any_of?: QueryDslIntervalsAnyOf - fuzzy?: QueryDslIntervalsFuzzy - match?: QueryDslIntervalsMatch - prefix?: QueryDslIntervalsPrefix - range?: QueryDslIntervalsRange - regexp?: QueryDslIntervalsRegexp - wildcard?: QueryDslIntervalsWildcard -} - -export interface QueryDslIntervalsRange { - analyzer?: string - gte?: string - gt?: string - lte?: string - lt?: string - use_field?: Field -} - -export interface QueryDslIntervalsRegexp { - analyzer?: string - pattern: string - use_field?: Field -} - -export interface QueryDslIntervalsWildcard { - analyzer?: string - pattern: string - use_field?: Field -} - -export type QueryDslLike = string | QueryDslLikeDocument - -export interface QueryDslLikeDocument { - doc?: any - fields?: Field[] - _id?: Id - _index?: IndexName - per_field_analyzer?: Record - routing?: Routing - version?: VersionNumber - version_type?: VersionType -} - -export interface QueryDslMatchAllQuery extends QueryDslQueryBase { -} - -export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { - analyzer?: string - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: QueryDslOperator - prefix_length?: integer - query: string -} - -export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { -} - -export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { - analyzer?: string - max_expansions?: integer - query: string - slop?: integer - zero_terms_query?: QueryDslZeroTermsQuery -} - -export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { - analyzer?: string - query: string - slop?: integer - zero_terms_query?: QueryDslZeroTermsQuery -} - -export interface QueryDslMatchQuery extends QueryDslQueryBase { - analyzer?: string - auto_generate_synonyms_phrase_query?: boolean - cutoff_frequency?: double - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: QueryDslOperator - prefix_length?: integer - query: string | float | boolean - zero_terms_query?: QueryDslZeroTermsQuery -} - -export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { - analyzer?: string - boost_terms?: double - fail_on_unsupported_field?: boolean - fields?: Field[] - include?: boolean - like: QueryDslLike | QueryDslLike[] - max_doc_freq?: integer - max_query_terms?: integer - max_word_length?: integer - min_doc_freq?: integer - minimum_should_match?: MinimumShouldMatch - min_term_freq?: integer - min_word_length?: integer - routing?: Routing - stop_words?: AnalysisStopWords - unlike?: QueryDslLike | QueryDslLike[] - version?: VersionNumber - version_type?: VersionType -} - -export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { - analyzer?: string - auto_generate_synonyms_phrase_query?: boolean - cutoff_frequency?: double - fields?: Fields - fuzziness?: Fuzziness - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_expansions?: integer - minimum_should_match?: MinimumShouldMatch - operator?: QueryDslOperator - prefix_length?: integer - query: string - slop?: integer - tie_breaker?: double - type?: QueryDslTextQueryType - zero_terms_query?: QueryDslZeroTermsQuery -} - -export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' - -export interface QueryDslNestedQuery extends QueryDslQueryBase { - ignore_unmapped?: boolean - inner_hits?: SearchInnerHits - path: Field - query: QueryDslQueryContainer - score_mode?: QueryDslChildScoreMode -} - -export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { -} - -export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { -} -export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } - -export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' - -export interface QueryDslParentIdQuery extends QueryDslQueryBase { - id?: Id - ignore_unmapped?: boolean - type?: RelationName -} - -export interface QueryDslPercolateQuery extends QueryDslQueryBase { - document?: any - documents?: any[] - field: Field - id?: Id - index?: IndexName - name?: string - preference?: string - routing?: Routing - version?: VersionNumber -} - -export interface QueryDslPinnedDoc { - _id: Id - _index: IndexName -} - -export interface QueryDslPinnedQuery extends QueryDslQueryBase { - organic: QueryDslQueryContainer - ids?: Id[] - docs?: QueryDslPinnedDoc[] -} - -export interface QueryDslPrefixQuery extends QueryDslQueryBase { - rewrite?: MultiTermQueryRewrite - value: string - case_insensitive?: boolean -} - -export interface QueryDslQueryBase { - boost?: float - _name?: string -} - -export interface QueryDslQueryContainer { - bool?: QueryDslBoolQuery - boosting?: QueryDslBoostingQuery - common?: Partial> - combined_fields?: QueryDslCombinedFieldsQuery - constant_score?: QueryDslConstantScoreQuery - dis_max?: QueryDslDisMaxQuery - distance_feature?: QueryDslDistanceFeatureQuery - exists?: QueryDslExistsQuery - function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[] - fuzzy?: Partial> - geo_bounding_box?: QueryDslGeoBoundingBoxQuery - geo_distance?: QueryDslGeoDistanceQuery - geo_grid?: Partial> - geo_polygon?: QueryDslGeoPolygonQuery - geo_shape?: QueryDslGeoShapeQuery - has_child?: QueryDslHasChildQuery - has_parent?: QueryDslHasParentQuery - ids?: QueryDslIdsQuery - intervals?: Partial> - knn?: KnnQuery - match?: Partial> - match_all?: QueryDslMatchAllQuery - match_bool_prefix?: Partial> - match_none?: QueryDslMatchNoneQuery - match_phrase?: Partial> - match_phrase_prefix?: Partial> - more_like_this?: QueryDslMoreLikeThisQuery - multi_match?: QueryDslMultiMatchQuery - nested?: QueryDslNestedQuery - parent_id?: QueryDslParentIdQuery - percolate?: QueryDslPercolateQuery - pinned?: QueryDslPinnedQuery - prefix?: Partial> - query_string?: QueryDslQueryStringQuery - range?: Partial> - rank_feature?: QueryDslRankFeatureQuery - regexp?: Partial> - rule?: QueryDslRuleQuery - script?: QueryDslScriptQuery - script_score?: QueryDslScriptScoreQuery - semantic?: QueryDslSemanticQuery - shape?: QueryDslShapeQuery - simple_query_string?: QueryDslSimpleQueryStringQuery - span_containing?: QueryDslSpanContainingQuery - span_field_masking?: QueryDslSpanFieldMaskingQuery - span_first?: QueryDslSpanFirstQuery - span_multi?: QueryDslSpanMultiTermQuery - span_near?: QueryDslSpanNearQuery - span_not?: QueryDslSpanNotQuery - span_or?: QueryDslSpanOrQuery - span_term?: Partial> - span_within?: QueryDslSpanWithinQuery - sparse_vector?: QueryDslSparseVectorQuery - term?: Partial> - terms?: QueryDslTermsQuery - terms_set?: Partial> - text_expansion?: Partial> - weighted_tokens?: Partial> - wildcard?: Partial> - wrapper?: QueryDslWrapperQuery - type?: QueryDslTypeQuery -} - -export interface QueryDslQueryStringQuery extends QueryDslQueryBase { - allow_leading_wildcard?: boolean - analyzer?: string - analyze_wildcard?: boolean - auto_generate_synonyms_phrase_query?: boolean - default_field?: Field - default_operator?: QueryDslOperator - enable_position_increments?: boolean - escape?: boolean - fields?: Field[] - fuzziness?: Fuzziness - fuzzy_max_expansions?: integer - fuzzy_prefix_length?: integer - fuzzy_rewrite?: MultiTermQueryRewrite - fuzzy_transpositions?: boolean - lenient?: boolean - max_determinized_states?: integer - minimum_should_match?: MinimumShouldMatch - phrase_slop?: double - query: string - quote_analyzer?: string - quote_field_suffix?: string - rewrite?: MultiTermQueryRewrite - tie_breaker?: double - time_zone?: TimeZone - type?: QueryDslTextQueryType -} - -export interface QueryDslRandomScoreFunction { - field?: Field - seed?: long | string -} - -export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery - -export interface QueryDslRangeQueryBase extends QueryDslQueryBase { - relation?: QueryDslRangeRelation - gt?: T - gte?: T - lt?: T - lte?: T - from?: T | null - to?: T | null -} - -export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' - -export interface QueryDslRankFeatureFunction { -} - -export interface QueryDslRankFeatureFunctionLinear { -} - -export interface QueryDslRankFeatureFunctionLogarithm { - scaling_factor: float -} - -export interface QueryDslRankFeatureFunctionSaturation { - pivot?: float -} - -export interface QueryDslRankFeatureFunctionSigmoid { - pivot: float - exponent: float -} - -export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { - field: Field - saturation?: QueryDslRankFeatureFunctionSaturation - log?: QueryDslRankFeatureFunctionLogarithm - linear?: QueryDslRankFeatureFunctionLinear - sigmoid?: QueryDslRankFeatureFunctionSigmoid -} - -export interface QueryDslRegexpQuery extends QueryDslQueryBase { - case_insensitive?: boolean - flags?: string - max_determinized_states?: integer - rewrite?: MultiTermQueryRewrite - value: string -} - -export interface QueryDslRuleQuery extends QueryDslQueryBase { - organic: QueryDslQueryContainer - ruleset_ids?: Id | Id[] - ruleset_id?: string - match_criteria: any -} - -export interface QueryDslScriptQuery extends QueryDslQueryBase { - script: Script | string -} - -export interface QueryDslScriptScoreFunction { - script: Script | string -} - -export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { - min_score?: float - query: QueryDslQueryContainer - script: Script | string -} - -export interface QueryDslSemanticQuery extends QueryDslQueryBase { - field: string - query: string -} - -export interface QueryDslShapeFieldQuery { - indexed_shape?: QueryDslFieldLookup - relation?: GeoShapeRelation - shape?: GeoShape -} - -export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { - ignore_unmapped?: boolean -} -export type QueryDslShapeQuery = QueryDslShapeQueryKeys -& { [property: string]: QueryDslShapeFieldQuery | boolean | float | string } - -export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' - -export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags - -export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { - analyzer?: string - analyze_wildcard?: boolean - auto_generate_synonyms_phrase_query?: boolean - default_operator?: QueryDslOperator - fields?: Field[] - flags?: QueryDslSimpleQueryStringFlags - fuzzy_max_expansions?: integer - fuzzy_prefix_length?: integer - fuzzy_transpositions?: boolean - lenient?: boolean - minimum_should_match?: MinimumShouldMatch - query: string - quote_field_suffix?: string -} - -export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { - big: QueryDslSpanQuery - little: QueryDslSpanQuery -} - -export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { - field: Field - query: QueryDslSpanQuery -} - -export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { - end: integer - match: QueryDslSpanQuery -} - -export type QueryDslSpanGapQuery = Partial> - -export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { - match: QueryDslQueryContainer -} - -export interface QueryDslSpanNearQuery extends QueryDslQueryBase { - clauses: QueryDslSpanQuery[] - in_order?: boolean - slop?: integer -} - -export interface QueryDslSpanNotQuery extends QueryDslQueryBase { - dist?: integer - exclude: QueryDslSpanQuery - include: QueryDslSpanQuery - post?: integer - pre?: integer -} - -export interface QueryDslSpanOrQuery extends QueryDslQueryBase { - clauses: QueryDslSpanQuery[] -} - -export interface QueryDslSpanQuery { - span_containing?: QueryDslSpanContainingQuery - span_field_masking?: QueryDslSpanFieldMaskingQuery - span_first?: QueryDslSpanFirstQuery - span_gap?: QueryDslSpanGapQuery - span_multi?: QueryDslSpanMultiTermQuery - span_near?: QueryDslSpanNearQuery - span_not?: QueryDslSpanNotQuery - span_or?: QueryDslSpanOrQuery - span_term?: Partial> - span_within?: QueryDslSpanWithinQuery -} - -export interface QueryDslSpanTermQuery extends QueryDslQueryBase { - value: string -} - -export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { - big: QueryDslSpanQuery - little: QueryDslSpanQuery -} - -export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { - field: Field - query_vector?: Record - inference_id?: Id - query?: string - prune?: boolean - pruning_config?: TokenPruningConfig -} - -export interface QueryDslTermQuery extends QueryDslQueryBase { - value: FieldValue - case_insensitive?: boolean -} - -export interface QueryDslTermRangeQuery extends QueryDslRangeQueryBase { -} - -export interface QueryDslTermsLookup { - index: IndexName - id: Id - path: Field - routing?: Routing -} - -export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { -} -export type QueryDslTermsQuery = QueryDslTermsQueryKeys -& { [property: string]: QueryDslTermsQueryField | float | string } - -export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup - -export interface QueryDslTermsSetQuery extends QueryDslQueryBase { - minimum_should_match?: MinimumShouldMatch - minimum_should_match_field?: Field - minimum_should_match_script?: Script | string - terms: string[] -} - -export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { - model_id: string - model_text: string - pruning_config?: TokenPruningConfig -} - -export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' - -export interface QueryDslTypeQuery extends QueryDslQueryBase { - value: string -} - -export interface QueryDslUntypedDecayFunctionKeys extends QueryDslDecayFunctionBase { -} -export type QueryDslUntypedDecayFunction = QueryDslUntypedDecayFunctionKeys -& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } - -export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { -} - -export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { - format?: DateFormat - time_zone?: TimeZone -} - -export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { - tokens: Record - pruning_config?: TokenPruningConfig -} - -export interface QueryDslWildcardQuery extends QueryDslQueryBase { - case_insensitive?: boolean - rewrite?: MultiTermQueryRewrite - value?: string - wildcard?: string -} - -export interface QueryDslWrapperQuery extends QueryDslQueryBase { - query: string -} - -export type QueryDslZeroTermsQuery = 'all' | 'none' - -export interface AsyncSearchAsyncSearch> { - aggregations?: TAggregations - _clusters?: ClusterStatistics - fields?: Record - hits: SearchHitsMetadata - max_score?: double - num_reduce_phases?: long - profile?: SearchProfile - pit_id?: Id - _scroll_id?: ScrollId - _shards: ShardStatistics - suggest?: Record[]> - terminated_early?: boolean - timed_out: boolean - took: long -} - -export interface AsyncSearchAsyncSearchDocumentResponseBase> extends AsyncSearchAsyncSearchResponseBase { - response: AsyncSearchAsyncSearch -} - -export interface AsyncSearchAsyncSearchResponseBase { - id?: Id - is_partial: boolean - is_running: boolean - expiration_time?: DateTime - expiration_time_in_millis: EpochTime - start_time?: DateTime - start_time_in_millis: EpochTime - completion_time?: DateTime - completion_time_in_millis?: EpochTime -} - -export interface AsyncSearchDeleteRequest extends RequestBase { - id: Id -} - -export type AsyncSearchDeleteResponse = AcknowledgedResponseBase - -export interface AsyncSearchGetRequest extends RequestBase { - id: Id - keep_alive?: Duration - typed_keys?: boolean - wait_for_completion_timeout?: Duration -} - -export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase - -export interface AsyncSearchStatusRequest extends RequestBase { - id: Id - keep_alive?: Duration -} - -export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase - -export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { - _shards: ShardStatistics - _clusters?: ClusterStatistics - completion_status?: integer -} - -export interface AsyncSearchSubmitRequest extends RequestBase { - index?: Indices - wait_for_completion_timeout?: Duration - keep_alive?: Duration - keep_on_completion?: boolean - allow_no_indices?: boolean - allow_partial_search_results?: boolean - analyzer?: string - analyze_wildcard?: boolean - batched_reduce_size?: long - ccs_minimize_roundtrips?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - lenient?: boolean - max_concurrent_shard_requests?: long - preference?: string - request_cache?: boolean - routing?: Routing - search_type?: SearchType - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string - typed_keys?: boolean - rest_total_hits_as_int?: boolean - _source_excludes?: Fields - _source_includes?: Fields - q?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - collapse?: SearchFieldCollapse - explain?: boolean - ext?: Record - from?: integer - highlight?: SearchHighlight - track_total_hits?: SearchTrackHits - indices_boost?: Partial>[] - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnSearch | KnnSearch[] - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - query?: QueryDslQueryContainer - rescore?: SearchRescore | SearchRescore[] - script_fields?: Record - search_after?: SortResults - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - suggest?: SearchSuggester - terminate_after?: long - timeout?: string - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean - stored_fields?: Fields - pit?: SearchPointInTimeReference - runtime_mappings?: MappingRuntimeFields - stats?: string[] - } -} - -export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase - -export interface AutoscalingAutoscalingPolicy { - roles: string[] - deciders: Record -} - -export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export type AutoscalingDeleteAutoscalingPolicyResponse = AcknowledgedResponseBase - -export interface AutoscalingGetAutoscalingCapacityAutoscalingCapacity { - node: AutoscalingGetAutoscalingCapacityAutoscalingResources - total: AutoscalingGetAutoscalingCapacityAutoscalingResources -} - -export interface AutoscalingGetAutoscalingCapacityAutoscalingDecider { - required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity - reason_summary?: string - reason_details?: any -} - -export interface AutoscalingGetAutoscalingCapacityAutoscalingDeciders { - required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity - current_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity - current_nodes: AutoscalingGetAutoscalingCapacityAutoscalingNode[] - deciders: Record -} - -export interface AutoscalingGetAutoscalingCapacityAutoscalingNode { - name: NodeName -} - -export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { - storage: integer - memory: integer -} - -export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { - master_timeout?: Duration -} - -export interface AutoscalingGetAutoscalingCapacityResponse { - policies: Record -} - -export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration -} - -export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy - -export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'policy' instead. */ - body?: AutoscalingAutoscalingPolicy -} - -export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase - -export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' - -export type CatCatAnonalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] - -export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' - -export type CatCatDatafeedColumns = CatCatDatafeedColumn | CatCatDatafeedColumn[] - -export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | 'ct' | 'createTime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'failure_reason' | 'fr' | 'failureReason' | 'id' | 'model_memory_limit' | 'mml' | 'modelMemoryLimit' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'progress' | 'p' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'type' | 't' | 'version' | 'v' - -export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] - -export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string - -export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] - -export type CatCatRecoveryColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'time' | 't' | 'ti' | 'primaryOrReplica' | 'type' | 'stage' | 'st' | 'source_host' | 'shost' | 'source_node' | 'snode' | 'target_host' | 'thost' | 'target_node' | 'tnode' | 'repository' | 'tnode' | 'snapshot' | 'snap' | 'files' | 'f' | 'files_recovered' | 'fr' | 'files_percent' | 'fp' | 'files_total' | 'tf' | 'bytes' | 'b' | 'bytes_recovered' | 'br' | 'bytes_percent' | 'bp' | 'bytes_total' | 'tb' | 'translog_ops' | 'to' | 'translog_ops_recovered' | 'tor' | 'translog_ops_percent' | 'top' | 'start_time' | 'start' | 'start_time_millis' | 'start_millis' | 'stop_time' | 'stop' | 'stop_time_millis' | 'stop_millis' | string - -export type CatCatRecoveryColumns = CatCatRecoveryColumn | CatCatRecoveryColumn[] - -export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { -} - -export type CatCatSegmentsColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'ip' | 'segment' | 'generation' | 'docs.count' | 'docs.deleted' | 'size' | 'size.memory' | 'committed' | 'searchable' | 'version' | 'compound' | 'id' | string - -export type CatCatSegmentsColumns = CatCatSegmentsColumn | CatCatSegmentsColumn[] - -export type CatCatShardColumn = 'completion.size' | 'cs' | 'completionSize' | 'dataset.size' | 'dense_vector.value_count' | 'dvc' | 'denseVectorCount' | 'docs' | 'd' | 'dc' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'id' | 'index' | 'i' | 'idx' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'node' | 'n' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'recoverysource.type' | 'rs' | 'refresh.time' | 'rti' | 'refreshTime' | 'refresh.total' | 'rto' | 'refreshTotal' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'seq_no.global_checkpoint' | 'sqg' | 'globalCheckpoint' | 'seq_no.local_checkpoint' | 'sql' | 'localCheckpoint' | 'seq_no.max' | 'sqm' | 'maxSeqNo' | 'shard' | 's' | 'sh' | 'dsparse_vector.value_count' | 'svc' | 'sparseVectorCount' | 'state' | 'st' | 'store' | 'sto' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'sync_id' | 'unassigned.at' | 'ua' | 'unassigned.details' | 'ud' | 'unassigned.for' | 'uf' | 'unassigned.reason' | 'ur' | string - -export type CatCatShardColumns = CatCatShardColumn | CatCatShardColumn[] - -export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'repo' | 'status' | 's' | 'start_epoch' | 'ste' | 'startEpoch' | 'start_time' | 'sti' | 'startTime' | 'end_epoch' | 'ete' | 'endEpoch' | 'end_time' | 'eti' | 'endTime' | 'duration' | 'dur' | 'indices' | 'i' | 'successful_shards' | 'ss' | 'failed_shards' | 'fs' | 'total_shards' | 'ts' | 'reason' | 'r' | string - -export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatSnapshotsColumn[] - -export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't' | string - -export type CatCatThreadPoolColumns = CatCatThreadPoolColumn | CatCatThreadPoolColumn[] - -export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' - -export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] - -export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v' - -export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] - -export interface CatAliasesAliasesRecord { - alias?: string - a?: string - index?: IndexName - i?: IndexName - idx?: IndexName - filter?: string - f?: string - fi?: string - 'routing.index'?: string - ri?: string - routingIndex?: string - 'routing.search'?: string - rs?: string - routingSearch?: string - is_write_index?: string - w?: string - isWriteIndex?: string -} - -export interface CatAliasesRequest extends CatCatRequestBase { - name?: Names - h?: Names - s?: Names - expand_wildcards?: ExpandWildcards - local?: boolean -} - -export type CatAliasesResponse = CatAliasesAliasesRecord[] - -export interface CatAllocationAllocationRecord { - shards?: string - s?: string - 'shards.undesired'?: string | null - 'write_load.forecast'?: SpecUtilsStringified | null - wlf?: SpecUtilsStringified | null - writeLoadForecast?: SpecUtilsStringified | null - 'disk.indices.forecast'?: ByteSize | null - dif?: ByteSize | null - diskIndicesForecast?: ByteSize | null - 'disk.indices'?: ByteSize | null - di?: ByteSize | null - diskIndices?: ByteSize | null - 'disk.used'?: ByteSize | null - du?: ByteSize | null - diskUsed?: ByteSize | null - 'disk.avail'?: ByteSize | null - da?: ByteSize | null - diskAvail?: ByteSize | null - 'disk.total'?: ByteSize | null - dt?: ByteSize | null - diskTotal?: ByteSize | null - 'disk.percent'?: Percentage | null - dp?: Percentage | null - diskPercent?: Percentage | null - host?: Host | null - h?: Host | null - ip?: Ip | null - node?: string - n?: string - 'node.role'?: string | null - r?: string | null - role?: string | null - nodeRole?: string | null -} - -export interface CatAllocationRequest extends CatCatRequestBase { - node_id?: NodeIds - bytes?: Bytes - h?: Names - s?: Names - local?: boolean - master_timeout?: Duration -} - -export type CatAllocationResponse = CatAllocationAllocationRecord[] - -export interface CatComponentTemplatesComponentTemplate { - name: string - version: string | null - alias_count: string - mapping_count: string - settings_count: string - metadata_count: string - included_in: string -} - -export interface CatComponentTemplatesRequest extends CatCatRequestBase { - name?: string - h?: Names - s?: Names - local?: boolean - master_timeout?: Duration -} - -export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] - -export interface CatCountCountRecord { - epoch?: SpecUtilsStringified> - t?: SpecUtilsStringified> - time?: SpecUtilsStringified> - timestamp?: TimeOfDay - ts?: TimeOfDay - hms?: TimeOfDay - hhmmss?: TimeOfDay - count?: string - dc?: string - 'docs.count'?: string - docsCount?: string -} - -export interface CatCountRequest extends CatCatRequestBase { - index?: Indices - h?: Names - s?: Names -} - -export type CatCountResponse = CatCountCountRecord[] - -export interface CatFielddataFielddataRecord { - id?: string - host?: string - h?: string - ip?: string - node?: string - n?: string - field?: string - f?: string - size?: string -} - -export interface CatFielddataRequest extends CatCatRequestBase { - fields?: Fields - bytes?: Bytes - h?: Names - s?: Names -} - -export type CatFielddataResponse = CatFielddataFielddataRecord[] - -export interface CatHealthHealthRecord { - epoch?: SpecUtilsStringified> - time?: SpecUtilsStringified> - timestamp?: TimeOfDay - ts?: TimeOfDay - hms?: TimeOfDay - hhmmss?: TimeOfDay - cluster?: string - cl?: string - status?: string - st?: string - 'node.total'?: string - nt?: string - nodeTotal?: string - 'node.data'?: string - nd?: string - nodeData?: string - shards?: string - t?: string - sh?: string - 'shards.total'?: string - shardsTotal?: string - pri?: string - p?: string - 'shards.primary'?: string - shardsPrimary?: string - relo?: string - r?: string - 'shards.relocating'?: string - shardsRelocating?: string - init?: string - i?: string - 'shards.initializing'?: string - shardsInitializing?: string - 'unassign.pri'?: string - up?: string - 'shards.unassigned.primary'?: string - shardsUnassignedPrimary?: string - unassign?: string - u?: string - 'shards.unassigned'?: string - shardsUnassigned?: string - pending_tasks?: string - pt?: string - pendingTasks?: string - max_task_wait_time?: string - mtwt?: string - maxTaskWaitTime?: string - active_shards_percent?: string - asp?: string - activeShardsPercent?: string -} - -export interface CatHealthRequest extends CatCatRequestBase { - time?: TimeUnit - ts?: boolean - h?: Names - s?: Names -} - -export type CatHealthResponse = CatHealthHealthRecord[] - -export interface CatHelpRequest { -} - -export interface CatHelpResponse { -} - -export interface CatIndicesIndicesRecord { - health?: string - h?: string - status?: string - s?: string - index?: string - i?: string - idx?: string - uuid?: string - id?: string - pri?: string - p?: string - 'shards.primary'?: string - shardsPrimary?: string - rep?: string - r?: string - 'shards.replica'?: string - shardsReplica?: string - 'docs.count'?: string | null - dc?: string | null - docsCount?: string | null - 'docs.deleted'?: string | null - dd?: string | null - docsDeleted?: string | null - 'creation.date'?: string - cd?: string - 'creation.date.string'?: string - cds?: string - 'store.size'?: string | null - ss?: string | null - storeSize?: string | null - 'pri.store.size'?: string | null - 'dataset.size'?: string | null - 'completion.size'?: string - cs?: string - completionSize?: string - 'pri.completion.size'?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'pri.fielddata.memory_size'?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'pri.fielddata.evictions'?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'pri.query_cache.memory_size'?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'pri.query_cache.evictions'?: string - 'request_cache.memory_size'?: string - rcm?: string - requestCacheMemory?: string - 'pri.request_cache.memory_size'?: string - 'request_cache.evictions'?: string - rce?: string - requestCacheEvictions?: string - 'pri.request_cache.evictions'?: string - 'request_cache.hit_count'?: string - rchc?: string - requestCacheHitCount?: string - 'pri.request_cache.hit_count'?: string - 'request_cache.miss_count'?: string - rcmc?: string - requestCacheMissCount?: string - 'pri.request_cache.miss_count'?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'pri.flush.total'?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'pri.flush.total_time'?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'pri.get.current'?: string - 'get.time'?: string - gti?: string - getTime?: string - 'pri.get.time'?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'pri.get.total'?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'pri.get.exists_time'?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'pri.get.exists_total'?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'pri.get.missing_time'?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'pri.get.missing_total'?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'pri.indexing.delete_current'?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'pri.indexing.delete_time'?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'pri.indexing.delete_total'?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'pri.indexing.index_current'?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'pri.indexing.index_time'?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'pri.indexing.index_total'?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'pri.indexing.index_failed'?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'pri.merges.current'?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'pri.merges.current_docs'?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'pri.merges.current_size'?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'pri.merges.total'?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'pri.merges.total_docs'?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'pri.merges.total_size'?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'pri.merges.total_time'?: string - 'refresh.total'?: string - rto?: string - refreshTotal?: string - 'pri.refresh.total'?: string - 'refresh.time'?: string - rti?: string - refreshTime?: string - 'pri.refresh.time'?: string - 'refresh.external_total'?: string - reto?: string - 'pri.refresh.external_total'?: string - 'refresh.external_time'?: string - reti?: string - 'pri.refresh.external_time'?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'pri.refresh.listeners'?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'pri.search.fetch_current'?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'pri.search.fetch_time'?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'pri.search.fetch_total'?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'pri.search.open_contexts'?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'pri.search.query_current'?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'pri.search.query_time'?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'pri.search.query_total'?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'pri.search.scroll_current'?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'pri.search.scroll_time'?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'pri.search.scroll_total'?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'pri.segments.count'?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'pri.segments.memory'?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'pri.segments.index_writer_memory'?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'pri.segments.version_map_memory'?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'pri.segments.fixed_bitset_memory'?: string - 'warmer.current'?: string - wc?: string - warmerCurrent?: string - 'pri.warmer.current'?: string - 'warmer.total'?: string - wto?: string - warmerTotal?: string - 'pri.warmer.total'?: string - 'warmer.total_time'?: string - wtt?: string - warmerTotalTime?: string - 'pri.warmer.total_time'?: string - 'suggest.current'?: string - suc?: string - suggestCurrent?: string - 'pri.suggest.current'?: string - 'suggest.time'?: string - suti?: string - suggestTime?: string - 'pri.suggest.time'?: string - 'suggest.total'?: string - suto?: string - suggestTotal?: string - 'pri.suggest.total'?: string - 'memory.total'?: string - tm?: string - memoryTotal?: string - 'pri.memory.total'?: string - 'search.throttled'?: string - sth?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperation?: string - 'pri.bulk.total_operations'?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'pri.bulk.total_time'?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'pri.bulk.total_size_in_bytes'?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'pri.bulk.avg_time'?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string - 'pri.bulk.avg_size_in_bytes'?: string -} - -export interface CatIndicesRequest extends CatCatRequestBase { - index?: Indices - bytes?: Bytes - expand_wildcards?: ExpandWildcards - health?: HealthStatus - include_unloaded_segments?: boolean - pri?: boolean - time?: TimeUnit - master_timeout?: Duration - h?: Names - s?: Names -} - -export type CatIndicesResponse = CatIndicesIndicesRecord[] - -export interface CatMasterMasterRecord { - id?: string - host?: string - h?: string - ip?: string - node?: string - n?: string -} - -export interface CatMasterRequest extends CatCatRequestBase { - h?: Names - s?: Names - local?: boolean - master_timeout?: Duration -} - -export type CatMasterResponse = CatMasterMasterRecord[] - -export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { - id?: Id - type?: string - t?: string - create_time?: string - ct?: string - createTime?: string - version?: VersionString - v?: VersionString - source_index?: IndexName - si?: IndexName - sourceIndex?: IndexName - dest_index?: IndexName - di?: IndexName - destIndex?: IndexName - description?: string - d?: string - model_memory_limit?: string - mml?: string - modelMemoryLimit?: string - state?: string - s?: string - failure_reason?: string - fr?: string - failureReason?: string - progress?: string - p?: string - assignment_explanation?: string - ae?: string - assignmentExplanation?: string - 'node.id'?: Id - ni?: Id - nodeId?: Id - 'node.name'?: Name - nn?: Name - nodeName?: Name - 'node.ephemeral_id'?: Id - ne?: Id - nodeEphemeralId?: Id - 'node.address'?: string - na?: string - nodeAddress?: string -} - -export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { - id?: Id - allow_no_match?: boolean - bytes?: Bytes - h?: CatCatDfaColumns - s?: CatCatDfaColumns - time?: TimeUnit -} - -export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] - -export interface CatMlDatafeedsDatafeedsRecord { - id?: string - state?: MlDatafeedState - s?: MlDatafeedState - assignment_explanation?: string - ae?: string - 'buckets.count'?: string - bc?: string - bucketsCount?: string - 'search.count'?: string - sc?: string - searchCount?: string - 'search.time'?: string - st?: string - searchTime?: string - 'search.bucket_avg'?: string - sba?: string - searchBucketAvg?: string - 'search.exp_avg_hour'?: string - seah?: string - searchExpAvgHour?: string - 'node.id'?: string - ni?: string - nodeId?: string - 'node.name'?: string - nn?: string - nodeName?: string - 'node.ephemeral_id'?: string - ne?: string - nodeEphemeralId?: string - 'node.address'?: string - na?: string - nodeAddress?: string -} - -export interface CatMlDatafeedsRequest extends CatCatRequestBase { - datafeed_id?: Id - allow_no_match?: boolean - h?: CatCatDatafeedColumns - s?: CatCatDatafeedColumns - time?: TimeUnit -} - -export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] - -export interface CatMlJobsJobsRecord { - id?: Id - state?: MlJobState - s?: MlJobState - opened_time?: string - ot?: string - assignment_explanation?: string - ae?: string - 'data.processed_records'?: string - dpr?: string - dataProcessedRecords?: string - 'data.processed_fields'?: string - dpf?: string - dataProcessedFields?: string - 'data.input_bytes'?: ByteSize - dib?: ByteSize - dataInputBytes?: ByteSize - 'data.input_records'?: string - dir?: string - dataInputRecords?: string - 'data.input_fields'?: string - dif?: string - dataInputFields?: string - 'data.invalid_dates'?: string - did?: string - dataInvalidDates?: string - 'data.missing_fields'?: string - dmf?: string - dataMissingFields?: string - 'data.out_of_order_timestamps'?: string - doot?: string - dataOutOfOrderTimestamps?: string - 'data.empty_buckets'?: string - deb?: string - dataEmptyBuckets?: string - 'data.sparse_buckets'?: string - dsb?: string - dataSparseBuckets?: string - 'data.buckets'?: string - db?: string - dataBuckets?: string - 'data.earliest_record'?: string - der?: string - dataEarliestRecord?: string - 'data.latest_record'?: string - dlr?: string - dataLatestRecord?: string - 'data.last'?: string - dl?: string - dataLast?: string - 'data.last_empty_bucket'?: string - dleb?: string - dataLastEmptyBucket?: string - 'data.last_sparse_bucket'?: string - dlsb?: string - dataLastSparseBucket?: string - 'model.bytes'?: ByteSize - mb?: ByteSize - modelBytes?: ByteSize - 'model.memory_status'?: MlMemoryStatus - mms?: MlMemoryStatus - modelMemoryStatus?: MlMemoryStatus - 'model.bytes_exceeded'?: ByteSize - mbe?: ByteSize - modelBytesExceeded?: ByteSize - 'model.memory_limit'?: string - mml?: string - modelMemoryLimit?: string - 'model.by_fields'?: string - mbf?: string - modelByFields?: string - 'model.over_fields'?: string - mof?: string - modelOverFields?: string - 'model.partition_fields'?: string - mpf?: string - modelPartitionFields?: string - 'model.bucket_allocation_failures'?: string - mbaf?: string - modelBucketAllocationFailures?: string - 'model.categorization_status'?: MlCategorizationStatus - mcs?: MlCategorizationStatus - modelCategorizationStatus?: MlCategorizationStatus - 'model.categorized_doc_count'?: string - mcdc?: string - modelCategorizedDocCount?: string - 'model.total_category_count'?: string - mtcc?: string - modelTotalCategoryCount?: string - 'model.frequent_category_count'?: string - modelFrequentCategoryCount?: string - 'model.rare_category_count'?: string - mrcc?: string - modelRareCategoryCount?: string - 'model.dead_category_count'?: string - mdcc?: string - modelDeadCategoryCount?: string - 'model.failed_category_count'?: string - mfcc?: string - modelFailedCategoryCount?: string - 'model.log_time'?: string - mlt?: string - modelLogTime?: string - 'model.timestamp'?: string - mt?: string - modelTimestamp?: string - 'forecasts.total'?: string - ft?: string - forecastsTotal?: string - 'forecasts.memory.min'?: string - fmmin?: string - forecastsMemoryMin?: string - 'forecasts.memory.max'?: string - fmmax?: string - forecastsMemoryMax?: string - 'forecasts.memory.avg'?: string - fmavg?: string - forecastsMemoryAvg?: string - 'forecasts.memory.total'?: string - fmt?: string - forecastsMemoryTotal?: string - 'forecasts.records.min'?: string - frmin?: string - forecastsRecordsMin?: string - 'forecasts.records.max'?: string - frmax?: string - forecastsRecordsMax?: string - 'forecasts.records.avg'?: string - fravg?: string - forecastsRecordsAvg?: string - 'forecasts.records.total'?: string - frt?: string - forecastsRecordsTotal?: string - 'forecasts.time.min'?: string - ftmin?: string - forecastsTimeMin?: string - 'forecasts.time.max'?: string - ftmax?: string - forecastsTimeMax?: string - 'forecasts.time.avg'?: string - ftavg?: string - forecastsTimeAvg?: string - 'forecasts.time.total'?: string - ftt?: string - forecastsTimeTotal?: string - 'node.id'?: NodeId - ni?: NodeId - nodeId?: NodeId - 'node.name'?: string - nn?: string - nodeName?: string - 'node.ephemeral_id'?: NodeId - ne?: NodeId - nodeEphemeralId?: NodeId - 'node.address'?: string - na?: string - nodeAddress?: string - 'buckets.count'?: string - bc?: string - bucketsCount?: string - 'buckets.time.total'?: string - btt?: string - bucketsTimeTotal?: string - 'buckets.time.min'?: string - btmin?: string - bucketsTimeMin?: string - 'buckets.time.max'?: string - btmax?: string - bucketsTimeMax?: string - 'buckets.time.exp_avg'?: string - btea?: string - bucketsTimeExpAvg?: string - 'buckets.time.exp_avg_hour'?: string - bteah?: string - bucketsTimeExpAvgHour?: string -} - -export interface CatMlJobsRequest extends CatCatRequestBase { - job_id?: Id - allow_no_match?: boolean - bytes?: Bytes - h?: CatCatAnonalyDetectorColumns - s?: CatCatAnonalyDetectorColumns - time?: TimeUnit -} - -export type CatMlJobsResponse = CatMlJobsJobsRecord[] - -export interface CatMlTrainedModelsRequest extends CatCatRequestBase { - model_id?: Id - allow_no_match?: boolean - bytes?: Bytes - h?: CatCatTrainedModelsColumns - s?: CatCatTrainedModelsColumns - from?: integer - size?: integer - time?: TimeUnit -} - -export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] - -export interface CatMlTrainedModelsTrainedModelsRecord { - id?: Id - created_by?: string - c?: string - createdBy?: string - heap_size?: ByteSize - hs?: ByteSize - modelHeapSize?: ByteSize - operations?: string - o?: string - modelOperations?: string - license?: string - l?: string - create_time?: DateTime - ct?: DateTime - version?: VersionString - v?: VersionString - description?: string - d?: string - 'ingest.pipelines'?: string - ip?: string - ingestPipelines?: string - 'ingest.count'?: string - ic?: string - ingestCount?: string - 'ingest.time'?: string - it?: string - ingestTime?: string - 'ingest.current'?: string - icurr?: string - ingestCurrent?: string - 'ingest.failed'?: string - if?: string - ingestFailed?: string - 'data_frame.id'?: string - dfid?: string - dataFrameAnalytics?: string - 'data_frame.create_time'?: string - dft?: string - dataFrameAnalyticsTime?: string - 'data_frame.source_index'?: string - dfsi?: string - dataFrameAnalyticsSrcIndex?: string - 'data_frame.analysis'?: string - dfa?: string - dataFrameAnalyticsAnalysis?: string - type?: string -} - -export interface CatNodeattrsNodeAttributesRecord { - node?: string - id?: string - pid?: string - host?: string - h?: string - ip?: string - i?: string - port?: string - attr?: string - value?: string -} - -export interface CatNodeattrsRequest extends CatCatRequestBase { - h?: Names - s?: Names - local?: boolean - master_timeout?: Duration -} - -export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] - -export interface CatNodesNodesRecord { - id?: Id - nodeId?: Id - pid?: string - p?: string - ip?: string - i?: string - port?: string - po?: string - http_address?: string - http?: string - version?: VersionString - v?: VersionString - flavor?: string - f?: string - type?: string - t?: string - build?: string - b?: string - jdk?: string - j?: string - 'disk.total'?: ByteSize - dt?: ByteSize - diskTotal?: ByteSize - 'disk.used'?: ByteSize - du?: ByteSize - diskUsed?: ByteSize - 'disk.avail'?: ByteSize - d?: ByteSize - da?: ByteSize - disk?: ByteSize - diskAvail?: ByteSize - 'disk.used_percent'?: Percentage - dup?: Percentage - diskUsedPercent?: Percentage - 'heap.current'?: string - hc?: string - heapCurrent?: string - 'heap.percent'?: Percentage - hp?: Percentage - heapPercent?: Percentage - 'heap.max'?: string - hm?: string - heapMax?: string - 'ram.current'?: string - rc?: string - ramCurrent?: string - 'ram.percent'?: Percentage - rp?: Percentage - ramPercent?: Percentage - 'ram.max'?: string - rn?: string - ramMax?: string - 'file_desc.current'?: string - fdc?: string - fileDescriptorCurrent?: string - 'file_desc.percent'?: Percentage - fdp?: Percentage - fileDescriptorPercent?: Percentage - 'file_desc.max'?: string - fdm?: string - fileDescriptorMax?: string - cpu?: string - load_1m?: string - load_5m?: string - load_15m?: string - l?: string - uptime?: string - u?: string - 'node.role'?: string - r?: string - role?: string - nodeRole?: string - master?: string - m?: string - name?: Name - n?: Name - 'completion.size'?: string - cs?: string - completionSize?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'query_cache.hit_count'?: string - qchc?: string - queryCacheHitCount?: string - 'query_cache.miss_count'?: string - qcmc?: string - queryCacheMissCount?: string - 'request_cache.memory_size'?: string - rcm?: string - requestCacheMemory?: string - 'request_cache.evictions'?: string - rce?: string - requestCacheEvictions?: string - 'request_cache.hit_count'?: string - rchc?: string - requestCacheHitCount?: string - 'request_cache.miss_count'?: string - rcmc?: string - requestCacheMissCount?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'get.time'?: string - gti?: string - getTime?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'refresh.total'?: string - 'refresh.time'?: string - 'refresh.external_total'?: string - rto?: string - refreshTotal?: string - 'refresh.external_time'?: string - rti?: string - refreshTime?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'script.compilations'?: string - scrcc?: string - scriptCompilations?: string - 'script.cache_evictions'?: string - scrce?: string - scriptCacheEvictions?: string - 'script.compilation_limit_triggered'?: string - scrclt?: string - scriptCacheCompilationLimitTriggered?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'suggest.current'?: string - suc?: string - suggestCurrent?: string - 'suggest.time'?: string - suti?: string - suggestTime?: string - 'suggest.total'?: string - suto?: string - suggestTotal?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperations?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string -} - -export interface CatNodesRequest extends CatCatRequestBase { - bytes?: Bytes - full_id?: boolean | string - include_unloaded_segments?: boolean - h?: CatCatNodeColumns - s?: Names - master_timeout?: Duration - time?: TimeUnit -} - -export type CatNodesResponse = CatNodesNodesRecord[] - -export interface CatPendingTasksPendingTasksRecord { - insertOrder?: string - o?: string - timeInQueue?: string - t?: string - priority?: string - p?: string - source?: string - s?: string -} - -export interface CatPendingTasksRequest extends CatCatRequestBase { - h?: Names - s?: Names - local?: boolean - master_timeout?: Duration - time?: TimeUnit -} - -export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] - -export interface CatPluginsPluginsRecord { - id?: NodeId - name?: Name - n?: Name - component?: string - c?: string - version?: VersionString - v?: VersionString - description?: string - d?: string - type?: string - t?: string -} - -export interface CatPluginsRequest extends CatCatRequestBase { - h?: Names - s?: Names - include_bootstrap?: boolean - local?: boolean - master_timeout?: Duration -} - -export type CatPluginsResponse = CatPluginsPluginsRecord[] - -export interface CatRecoveryRecoveryRecord { - index?: IndexName - i?: IndexName - idx?: IndexName - shard?: string - s?: string - sh?: string - start_time?: DateTime - start?: DateTime - start_time_millis?: EpochTime - start_millis?: EpochTime - stop_time?: DateTime - stop?: DateTime - stop_time_millis?: EpochTime - stop_millis?: EpochTime - time?: Duration - t?: Duration - ti?: Duration - type?: string - ty?: string - stage?: string - st?: string - source_host?: string - shost?: string - source_node?: string - snode?: string - target_host?: string - thost?: string - target_node?: string - tnode?: string - repository?: string - rep?: string - snapshot?: string - snap?: string - files?: string - f?: string - files_recovered?: string - fr?: string - files_percent?: Percentage - fp?: Percentage - files_total?: string - tf?: string - bytes?: string - b?: string - bytes_recovered?: string - br?: string - bytes_percent?: Percentage - bp?: Percentage - bytes_total?: string - tb?: string - translog_ops?: string - to?: string - translog_ops_recovered?: string - tor?: string - translog_ops_percent?: Percentage - top?: Percentage -} - -export interface CatRecoveryRequest extends CatCatRequestBase { - index?: Indices - active_only?: boolean - bytes?: Bytes - detailed?: boolean - h?: CatCatRecoveryColumns - s?: Names - time?: TimeUnit -} - -export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] - -export interface CatRepositoriesRepositoriesRecord { - id?: string - repoId?: string - type?: string - t?: string -} - -export interface CatRepositoriesRequest extends CatCatRequestBase { - h?: Names - s?: Names - local?: boolean - master_timeout?: Duration -} - -export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] - -export interface CatSegmentsRequest extends CatCatRequestBase { - index?: Indices - bytes?: Bytes - h?: CatCatSegmentsColumns - s?: Names - local?: boolean - master_timeout?: Duration -} - -export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] - -export interface CatSegmentsSegmentsRecord { - index?: IndexName - i?: IndexName - idx?: IndexName - shard?: string - s?: string - sh?: string - prirep?: string - p?: string - pr?: string - primaryOrReplica?: string - ip?: string - id?: NodeId - segment?: string - seg?: string - generation?: string - g?: string - gen?: string - 'docs.count'?: string - dc?: string - docsCount?: string - 'docs.deleted'?: string - dd?: string - docsDeleted?: string - size?: ByteSize - si?: ByteSize - 'size.memory'?: ByteSize - sm?: ByteSize - sizeMemory?: ByteSize - committed?: string - ic?: string - isCommitted?: string - searchable?: string - is?: string - isSearchable?: string - version?: VersionString - v?: VersionString - compound?: string - ico?: string - isCompound?: string -} - -export interface CatShardsRequest extends CatCatRequestBase { - index?: Indices - bytes?: Bytes - h?: CatCatShardColumns - s?: Names - master_timeout?: Duration - time?: TimeUnit -} - -export type CatShardsResponse = CatShardsShardsRecord[] - -export interface CatShardsShardsRecord { - index?: string - i?: string - idx?: string - shard?: string - s?: string - sh?: string - prirep?: string - p?: string - pr?: string - primaryOrReplica?: string - state?: string - st?: string - docs?: string | null - d?: string | null - dc?: string | null - store?: string | null - sto?: string | null - dataset?: string | null - ip?: string | null - id?: string - node?: string | null - n?: string | null - sync_id?: string - 'unassigned.reason'?: string - ur?: string - 'unassigned.at'?: string - ua?: string - 'unassigned.for'?: string - uf?: string - 'unassigned.details'?: string - ud?: string - 'recoverysource.type'?: string - rs?: string - 'completion.size'?: string - cs?: string - completionSize?: string - 'fielddata.memory_size'?: string - fm?: string - fielddataMemory?: string - 'fielddata.evictions'?: string - fe?: string - fielddataEvictions?: string - 'query_cache.memory_size'?: string - qcm?: string - queryCacheMemory?: string - 'query_cache.evictions'?: string - qce?: string - queryCacheEvictions?: string - 'flush.total'?: string - ft?: string - flushTotal?: string - 'flush.total_time'?: string - ftt?: string - flushTotalTime?: string - 'get.current'?: string - gc?: string - getCurrent?: string - 'get.time'?: string - gti?: string - getTime?: string - 'get.total'?: string - gto?: string - getTotal?: string - 'get.exists_time'?: string - geti?: string - getExistsTime?: string - 'get.exists_total'?: string - geto?: string - getExistsTotal?: string - 'get.missing_time'?: string - gmti?: string - getMissingTime?: string - 'get.missing_total'?: string - gmto?: string - getMissingTotal?: string - 'indexing.delete_current'?: string - idc?: string - indexingDeleteCurrent?: string - 'indexing.delete_time'?: string - idti?: string - indexingDeleteTime?: string - 'indexing.delete_total'?: string - idto?: string - indexingDeleteTotal?: string - 'indexing.index_current'?: string - iic?: string - indexingIndexCurrent?: string - 'indexing.index_time'?: string - iiti?: string - indexingIndexTime?: string - 'indexing.index_total'?: string - iito?: string - indexingIndexTotal?: string - 'indexing.index_failed'?: string - iif?: string - indexingIndexFailed?: string - 'merges.current'?: string - mc?: string - mergesCurrent?: string - 'merges.current_docs'?: string - mcd?: string - mergesCurrentDocs?: string - 'merges.current_size'?: string - mcs?: string - mergesCurrentSize?: string - 'merges.total'?: string - mt?: string - mergesTotal?: string - 'merges.total_docs'?: string - mtd?: string - mergesTotalDocs?: string - 'merges.total_size'?: string - mts?: string - mergesTotalSize?: string - 'merges.total_time'?: string - mtt?: string - mergesTotalTime?: string - 'refresh.total'?: string - 'refresh.time'?: string - 'refresh.external_total'?: string - rto?: string - refreshTotal?: string - 'refresh.external_time'?: string - rti?: string - refreshTime?: string - 'refresh.listeners'?: string - rli?: string - refreshListeners?: string - 'search.fetch_current'?: string - sfc?: string - searchFetchCurrent?: string - 'search.fetch_time'?: string - sfti?: string - searchFetchTime?: string - 'search.fetch_total'?: string - sfto?: string - searchFetchTotal?: string - 'search.open_contexts'?: string - so?: string - searchOpenContexts?: string - 'search.query_current'?: string - sqc?: string - searchQueryCurrent?: string - 'search.query_time'?: string - sqti?: string - searchQueryTime?: string - 'search.query_total'?: string - sqto?: string - searchQueryTotal?: string - 'search.scroll_current'?: string - scc?: string - searchScrollCurrent?: string - 'search.scroll_time'?: string - scti?: string - searchScrollTime?: string - 'search.scroll_total'?: string - scto?: string - searchScrollTotal?: string - 'segments.count'?: string - sc?: string - segmentsCount?: string - 'segments.memory'?: string - sm?: string - segmentsMemory?: string - 'segments.index_writer_memory'?: string - siwm?: string - segmentsIndexWriterMemory?: string - 'segments.version_map_memory'?: string - svmm?: string - segmentsVersionMapMemory?: string - 'segments.fixed_bitset_memory'?: string - sfbm?: string - fixedBitsetMemory?: string - 'seq_no.max'?: string - sqm?: string - maxSeqNo?: string - 'seq_no.local_checkpoint'?: string - sql?: string - localCheckpoint?: string - 'seq_no.global_checkpoint'?: string - sqg?: string - globalCheckpoint?: string - 'warmer.current'?: string - wc?: string - warmerCurrent?: string - 'warmer.total'?: string - wto?: string - warmerTotal?: string - 'warmer.total_time'?: string - wtt?: string - warmerTotalTime?: string - 'path.data'?: string - pd?: string - dataPath?: string - 'path.state'?: string - ps?: string - statsPath?: string - 'bulk.total_operations'?: string - bto?: string - bulkTotalOperations?: string - 'bulk.total_time'?: string - btti?: string - bulkTotalTime?: string - 'bulk.total_size_in_bytes'?: string - btsi?: string - bulkTotalSizeInBytes?: string - 'bulk.avg_time'?: string - bati?: string - bulkAvgTime?: string - 'bulk.avg_size_in_bytes'?: string - basi?: string - bulkAvgSizeInBytes?: string -} - -export interface CatSnapshotsRequest extends CatCatRequestBase { - repository?: Names - ignore_unavailable?: boolean - h?: CatCatSnapshotsColumns - s?: Names - master_timeout?: Duration - time?: TimeUnit -} - -export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] - -export interface CatSnapshotsSnapshotsRecord { - id?: string - snapshot?: string - repository?: string - re?: string - repo?: string - status?: string - s?: string - start_epoch?: SpecUtilsStringified> - ste?: SpecUtilsStringified> - startEpoch?: SpecUtilsStringified> - start_time?: WatcherScheduleTimeOfDay - sti?: WatcherScheduleTimeOfDay - startTime?: WatcherScheduleTimeOfDay - end_epoch?: SpecUtilsStringified> - ete?: SpecUtilsStringified> - endEpoch?: SpecUtilsStringified> - end_time?: TimeOfDay - eti?: TimeOfDay - endTime?: TimeOfDay - duration?: Duration - dur?: Duration - indices?: string - i?: string - successful_shards?: string - ss?: string - failed_shards?: string - fs?: string - total_shards?: string - ts?: string - reason?: string - r?: string -} - -export interface CatTasksRequest extends CatCatRequestBase { - actions?: string[] - detailed?: boolean - nodes?: string[] - parent_task_id?: string - h?: Names - s?: Names - time?: TimeUnit - timeout?: Duration - wait_for_completion?: boolean -} - -export type CatTasksResponse = CatTasksTasksRecord[] - -export interface CatTasksTasksRecord { - id?: Id - action?: string - ac?: string - task_id?: Id - ti?: Id - parent_task_id?: string - pti?: string - type?: string - ty?: string - start_time?: string - start?: string - timestamp?: string - ts?: string - hms?: string - hhmmss?: string - running_time_ns?: string - running_time?: string - time?: string - node_id?: NodeId - ni?: NodeId - ip?: string - i?: string - port?: string - po?: string - node?: string - n?: string - version?: VersionString - v?: VersionString - x_opaque_id?: string - x?: string - description?: string - desc?: string -} - -export interface CatTemplatesRequest extends CatCatRequestBase { - name?: Name - h?: Names - s?: Names - local?: boolean - master_timeout?: Duration -} - -export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] - -export interface CatTemplatesTemplatesRecord { - name?: Name - n?: Name - index_patterns?: string - t?: string - order?: string - o?: string - p?: string - version?: VersionString | null - v?: VersionString | null - composed_of?: string - c?: string -} - -export interface CatThreadPoolRequest extends CatCatRequestBase { - thread_pool_patterns?: Names - h?: CatCatThreadPoolColumns - s?: Names - time?: TimeUnit - local?: boolean - master_timeout?: Duration -} - -export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] - -export interface CatThreadPoolThreadPoolRecord { - node_name?: string - nn?: string - node_id?: NodeId - id?: NodeId - ephemeral_node_id?: string - eid?: string - pid?: string - p?: string - host?: string - h?: string - ip?: string - i?: string - port?: string - po?: string - name?: string - n?: string - type?: string - t?: string - active?: string - a?: string - pool_size?: string - psz?: string - queue?: string - q?: string - queue_size?: string - qs?: string - rejected?: string - r?: string - largest?: string - l?: string - completed?: string - c?: string - core?: string | null - cr?: string | null - max?: string | null - mx?: string | null - size?: string | null - sz?: string | null - keep_alive?: string | null - ka?: string | null -} - -export interface CatTransformsRequest extends CatCatRequestBase { - transform_id?: Id - allow_no_match?: boolean - from?: integer - h?: CatCatTransformColumns - s?: CatCatTransformColumns - time?: TimeUnit - size?: integer -} - -export type CatTransformsResponse = CatTransformsTransformsRecord[] - -export interface CatTransformsTransformsRecord { - id?: Id - state?: string - s?: string - checkpoint?: string - c?: string - documents_processed?: string - docp?: string - documentsProcessed?: string - checkpoint_progress?: string | null - cp?: string | null - checkpointProgress?: string | null - last_search_time?: string | null - lst?: string | null - lastSearchTime?: string | null - changes_last_detection_time?: string | null - cldt?: string | null - create_time?: string - ct?: string - createTime?: string - version?: VersionString - v?: VersionString - source_index?: string - si?: string - sourceIndex?: string - dest_index?: string - di?: string - destIndex?: string - pipeline?: string - p?: string - description?: string - d?: string - transform_type?: string - tt?: string - frequency?: string - f?: string - max_page_search_size?: string - mpsz?: string - docs_per_second?: string - dps?: string - reason?: string - r?: string - search_total?: string - st?: string - search_failure?: string - sf?: string - search_time?: string - stime?: string - index_total?: string - it?: string - index_failure?: string - if?: string - index_time?: string - itime?: string - documents_indexed?: string - doci?: string - delete_time?: string - dtime?: string - documents_deleted?: string - docd?: string - trigger_count?: string - tc?: string - pages_processed?: string - pp?: string - processing_time?: string - pt?: string - checkpoint_duration_time_exp_avg?: string - cdtea?: string - checkpointTimeExpAvg?: string - indexed_documents_exp_avg?: string - idea?: string - processed_documents_exp_avg?: string - pdea?: string -} - -export interface CcrFollowIndexStats { - index: IndexName - shards: CcrShardStats[] -} - -export interface CcrReadException { - exception: ErrorCause - from_seq_no: SequenceNumber - retries: integer -} - -export interface CcrShardStats { - bytes_read: long - failed_read_requests: long - failed_write_requests: long - fatal_exception?: ErrorCause - follower_aliases_version: VersionNumber - follower_global_checkpoint: long - follower_index: string - follower_mapping_version: VersionNumber - follower_max_seq_no: SequenceNumber - follower_settings_version: VersionNumber - last_requested_seq_no: SequenceNumber - leader_global_checkpoint: long - leader_index: string - leader_max_seq_no: SequenceNumber - operations_read: long - operations_written: long - outstanding_read_requests: integer - outstanding_write_requests: integer - read_exceptions: CcrReadException[] - remote_cluster: string - shard_id: integer - successful_read_requests: long - successful_write_requests: long - time_since_last_read?: Duration - time_since_last_read_millis: DurationValue - total_read_remote_exec_time?: Duration - total_read_remote_exec_time_millis: DurationValue - total_read_time?: Duration - total_read_time_millis: DurationValue - total_write_time?: Duration - total_write_time_millis: DurationValue - write_buffer_operation_count: long - write_buffer_size_in_bytes: ByteSize -} - -export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { - name: Name - master_timeout?: Duration -} - -export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase - -export interface CcrFollowRequest extends RequestBase { - index: IndexName - master_timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - data_stream_name?: string - leader_index: IndexName - max_outstanding_read_requests?: long - max_outstanding_write_requests?: integer - max_read_request_operation_count?: integer - max_read_request_size?: ByteSize - max_retry_delay?: Duration - max_write_buffer_count?: integer - max_write_buffer_size?: ByteSize - max_write_request_operation_count?: integer - max_write_request_size?: ByteSize - read_poll_timeout?: Duration - remote_cluster: string - settings?: IndicesIndexSettings - } -} - -export interface CcrFollowResponse { - follow_index_created: boolean - follow_index_shards_acked: boolean - index_following_started: boolean -} - -export interface CcrFollowInfoFollowerIndex { - follower_index: IndexName - leader_index: IndexName - parameters?: CcrFollowInfoFollowerIndexParameters - remote_cluster: Name - status: CcrFollowInfoFollowerIndexStatus -} - -export interface CcrFollowInfoFollowerIndexParameters { - max_outstanding_read_requests?: long - max_outstanding_write_requests?: integer - max_read_request_operation_count?: integer - max_read_request_size?: ByteSize - max_retry_delay?: Duration - max_write_buffer_count?: integer - max_write_buffer_size?: ByteSize - max_write_request_operation_count?: integer - max_write_request_size?: ByteSize - read_poll_timeout?: Duration -} - -export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' - -export interface CcrFollowInfoRequest extends RequestBase { - index: Indices - master_timeout?: Duration -} - -export interface CcrFollowInfoResponse { - follower_indices: CcrFollowInfoFollowerIndex[] -} - -export interface CcrFollowStatsRequest extends RequestBase { - index: Indices - timeout?: Duration -} - -export interface CcrFollowStatsResponse { - indices: CcrFollowIndexStats[] -} - -export interface CcrForgetFollowerRequest extends RequestBase { - index: IndexName - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - follower_cluster?: string - follower_index?: IndexName - follower_index_uuid?: Uuid - leader_remote_cluster?: string - } -} - -export interface CcrForgetFollowerResponse { - _shards: ShardStatistics -} - -export interface CcrGetAutoFollowPatternAutoFollowPattern { - name: Name - pattern: CcrGetAutoFollowPatternAutoFollowPatternSummary -} - -export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { - active: boolean - remote_cluster: string - follow_index_pattern?: IndexPattern - leader_index_patterns: IndexPatterns - leader_index_exclusion_patterns: IndexPatterns - max_outstanding_read_requests: integer -} - -export interface CcrGetAutoFollowPatternRequest extends RequestBase { - name?: Name - master_timeout?: Duration -} - -export interface CcrGetAutoFollowPatternResponse { - patterns: CcrGetAutoFollowPatternAutoFollowPattern[] -} - -export interface CcrPauseAutoFollowPatternRequest extends RequestBase { - name: Name - master_timeout?: Duration -} - -export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase - -export interface CcrPauseFollowRequest extends RequestBase { - index: IndexName - master_timeout?: Duration -} - -export type CcrPauseFollowResponse = AcknowledgedResponseBase - -export interface CcrPutAutoFollowPatternRequest extends RequestBase { - name: Name - master_timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - remote_cluster: string - follow_index_pattern?: IndexPattern - leader_index_patterns?: IndexPatterns - leader_index_exclusion_patterns?: IndexPatterns - max_outstanding_read_requests?: integer - settings?: Record - max_outstanding_write_requests?: integer - read_poll_timeout?: Duration - max_read_request_operation_count?: integer - max_read_request_size?: ByteSize - max_retry_delay?: Duration - max_write_buffer_count?: integer - max_write_buffer_size?: ByteSize - max_write_request_operation_count?: integer - max_write_request_size?: ByteSize - } -} - -export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase - -export interface CcrResumeAutoFollowPatternRequest extends RequestBase { - name: Name - master_timeout?: Duration -} - -export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase - -export interface CcrResumeFollowRequest extends RequestBase { - index: IndexName - master_timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - max_outstanding_read_requests?: long - max_outstanding_write_requests?: long - max_read_request_operation_count?: long - max_read_request_size?: string - max_retry_delay?: Duration - max_write_buffer_count?: long - max_write_buffer_size?: string - max_write_request_operation_count?: long - max_write_request_size?: string - read_poll_timeout?: Duration - } -} - -export type CcrResumeFollowResponse = AcknowledgedResponseBase - -export interface CcrStatsAutoFollowStats { - auto_followed_clusters: CcrStatsAutoFollowedCluster[] - number_of_failed_follow_indices: long - number_of_failed_remote_cluster_state_requests: long - number_of_successful_follow_indices: long - recent_auto_follow_errors: ErrorCause[] -} - -export interface CcrStatsAutoFollowedCluster { - cluster_name: Name - last_seen_metadata_version: VersionNumber - time_since_last_check_millis: DurationValue -} - -export interface CcrStatsFollowStats { - indices: CcrFollowIndexStats[] -} - -export interface CcrStatsRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export interface CcrStatsResponse { - auto_follow_stats: CcrStatsAutoFollowStats - follow_stats: CcrStatsFollowStats -} - -export interface CcrUnfollowRequest extends RequestBase { - index: IndexName - master_timeout?: Duration -} - -export type CcrUnfollowResponse = AcknowledgedResponseBase - -export interface ClusterComponentTemplate { - name: Name - component_template: ClusterComponentTemplateNode -} - -export interface ClusterComponentTemplateNode { - template: ClusterComponentTemplateSummary - version?: VersionNumber - _meta?: Metadata - deprecated?: boolean -} - -export interface ClusterComponentTemplateSummary { - _meta?: Metadata - version?: VersionNumber - settings?: Record - mappings?: MappingTypeMapping - aliases?: Record - lifecycle?: IndicesDataStreamLifecycleWithRollover -} - -export interface ClusterAllocationExplainAllocationDecision { - decider: string - decision: ClusterAllocationExplainAllocationExplainDecision - explanation: string -} - -export type ClusterAllocationExplainAllocationExplainDecision = 'NO' | 'YES' | 'THROTTLE' | 'ALWAYS' - -export interface ClusterAllocationExplainAllocationStore { - allocation_id: string - found: boolean - in_sync: boolean - matching_size_in_bytes: long - matching_sync_id: boolean - store_exception: string -} - -export interface ClusterAllocationExplainClusterInfo { - nodes: Record - shard_sizes: Record - shard_data_set_sizes?: Record - shard_paths: Record - reserved_sizes: ClusterAllocationExplainReservedSize[] -} - -export interface ClusterAllocationExplainCurrentNode { - id: Id - name: Name - roles: NodeRoles - attributes: Record - transport_address: TransportAddress - weight_ranking: integer -} - -export type ClusterAllocationExplainDecision = 'yes' | 'no' | 'worse_balance' | 'throttled' | 'awaiting_info' | 'allocation_delayed' | 'no_valid_shard_copy' | 'no_attempt' - -export interface ClusterAllocationExplainDiskUsage { - path: string - total_bytes: long - used_bytes: long - free_bytes: long - free_disk_percent: double - used_disk_percent: double -} - -export interface ClusterAllocationExplainNodeAllocationExplanation { - deciders: ClusterAllocationExplainAllocationDecision[] - node_attributes: Record - node_decision: ClusterAllocationExplainDecision - node_id: Id - node_name: Name - roles: NodeRoles - store?: ClusterAllocationExplainAllocationStore - transport_address: TransportAddress - weight_ranking: integer -} - -export interface ClusterAllocationExplainNodeDiskUsage { - node_name: Name - least_available: ClusterAllocationExplainDiskUsage - most_available: ClusterAllocationExplainDiskUsage -} - -export interface ClusterAllocationExplainRequest extends RequestBase { - include_disk_info?: boolean - include_yes_decisions?: boolean - master_timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - current_node?: string - index?: IndexName - primary?: boolean - shard?: integer - } -} - -export interface ClusterAllocationExplainReservedSize { - node_id: Id - path: string - total: long - shards: string[] -} - -export interface ClusterAllocationExplainResponse { - allocate_explanation?: string - allocation_delay?: Duration - allocation_delay_in_millis?: DurationValue - can_allocate?: ClusterAllocationExplainDecision - can_move_to_other_node?: ClusterAllocationExplainDecision - can_rebalance_cluster?: ClusterAllocationExplainDecision - can_rebalance_cluster_decisions?: ClusterAllocationExplainAllocationDecision[] - can_rebalance_to_other_node?: ClusterAllocationExplainDecision - can_remain_decisions?: ClusterAllocationExplainAllocationDecision[] - can_remain_on_current_node?: ClusterAllocationExplainDecision - cluster_info?: ClusterAllocationExplainClusterInfo - configured_delay?: Duration - configured_delay_in_millis?: DurationValue - current_node?: ClusterAllocationExplainCurrentNode - current_state: string - index: IndexName - move_explanation?: string - node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[] - primary: boolean - rebalance_explanation?: string - remaining_delay?: Duration - remaining_delay_in_millis?: DurationValue - shard: integer - unassigned_info?: ClusterAllocationExplainUnassignedInformation - note?: string -} - -export interface ClusterAllocationExplainUnassignedInformation { - at: DateTime - last_allocation_status?: string - reason: ClusterAllocationExplainUnassignedInformationReason - details?: string - failed_allocation_attempts?: integer - delayed?: boolean - allocation_status?: string -} - -export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' - -export interface ClusterDeleteComponentTemplateRequest extends RequestBase { - name: Names - master_timeout?: Duration - timeout?: Duration -} - -export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase - -export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { - master_timeout?: Duration - wait_for_removal?: boolean -} - -export type ClusterDeleteVotingConfigExclusionsResponse = boolean - -export interface ClusterExistsComponentTemplateRequest extends RequestBase { - name: Names - master_timeout?: Duration - local?: boolean -} - -export type ClusterExistsComponentTemplateResponse = boolean - -export interface ClusterGetComponentTemplateRequest extends RequestBase { - name?: Name - flat_settings?: boolean - settings_filter?: string | string[] - include_defaults?: boolean - local?: boolean - master_timeout?: Duration -} - -export interface ClusterGetComponentTemplateResponse { - component_templates: ClusterComponentTemplate[] -} - -export interface ClusterGetSettingsRequest extends RequestBase { - flat_settings?: boolean - include_defaults?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export interface ClusterGetSettingsResponse { - persistent: Record - transient: Record - defaults?: Record -} - -export interface ClusterHealthHealthResponseBody { - active_primary_shards: integer - active_shards: integer - active_shards_percent?: string - active_shards_percent_as_number: double - cluster_name: Name - delayed_unassigned_shards: integer - indices?: Record - initializing_shards: integer - number_of_data_nodes: integer - number_of_in_flight_fetch: integer - number_of_nodes: integer - number_of_pending_tasks: integer - relocating_shards: integer - status: HealthStatus - task_max_waiting_in_queue?: Duration - task_max_waiting_in_queue_millis: DurationValue - timed_out: boolean - unassigned_primary_shards: integer - unassigned_shards: integer -} - -export interface ClusterHealthIndexHealthStats { - active_primary_shards: integer - active_shards: integer - initializing_shards: integer - number_of_replicas: integer - number_of_shards: integer - relocating_shards: integer - shards?: Record - status: HealthStatus - unassigned_shards: integer - unassigned_primary_shards: integer -} - -export interface ClusterHealthRequest extends RequestBase { - index?: Indices - expand_wildcards?: ExpandWildcards - level?: Level - local?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - wait_for_events?: WaitForEvents - wait_for_nodes?: ClusterHealthWaitForNodes - wait_for_no_initializing_shards?: boolean - wait_for_no_relocating_shards?: boolean - wait_for_status?: HealthStatus -} - -export type ClusterHealthResponse = ClusterHealthHealthResponseBody - -export interface ClusterHealthShardHealthStats { - active_shards: integer - initializing_shards: integer - primary_active: boolean - relocating_shards: integer - status: HealthStatus - unassigned_shards: integer - unassigned_primary_shards: integer -} - -export type ClusterHealthWaitForNodes = string | integer - -export interface ClusterInfoRequest extends RequestBase { - target: ClusterInfoTargets -} - -export interface ClusterInfoResponse { - cluster_name: Name - http?: NodesHttp - ingest?: NodesIngest - thread_pool?: Record - script?: NodesScripting -} - -export interface ClusterPendingTasksPendingTask { - executing: boolean - insert_order: integer - priority: string - source: string - time_in_queue?: Duration - time_in_queue_millis: DurationValue -} - -export interface ClusterPendingTasksRequest extends RequestBase { - local?: boolean - master_timeout?: Duration -} - -export interface ClusterPendingTasksResponse { - tasks: ClusterPendingTasksPendingTask[] -} - -export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { - node_names?: Names - node_ids?: Ids - master_timeout?: Duration - timeout?: Duration -} - -export type ClusterPostVotingConfigExclusionsResponse = boolean - -export interface ClusterPutComponentTemplateRequest extends RequestBase { - name: Name - create?: boolean - cause?: string - master_timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - template: IndicesIndexState - version?: VersionNumber - _meta?: Metadata - deprecated?: boolean - } -} - -export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase - -export interface ClusterPutSettingsRequest extends RequestBase { - flat_settings?: boolean - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - persistent?: Record - transient?: Record - } -} - -export interface ClusterPutSettingsResponse { - acknowledged: boolean - persistent: Record - transient: Record -} - -export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo - -export interface ClusterRemoteInfoClusterRemoteProxyInfo { - mode: 'proxy' - connected: boolean - initial_connect_timeout: Duration - skip_unavailable: boolean - proxy_address: string - server_name: string - num_proxy_sockets_connected: integer - max_proxy_socket_connections: integer - cluster_credentials?: string -} - -export interface ClusterRemoteInfoClusterRemoteSniffInfo { - mode: 'sniff' - connected: boolean - max_connections_per_cluster: integer - num_nodes_connected: long - initial_connect_timeout: Duration - skip_unavailable: boolean - seeds: string[] -} - -export interface ClusterRemoteInfoRequest extends RequestBase { -} - -export type ClusterRemoteInfoResponse = Record - -export interface ClusterRerouteCommand { - cancel?: ClusterRerouteCommandCancelAction - move?: ClusterRerouteCommandMoveAction - allocate_replica?: ClusterRerouteCommandAllocateReplicaAction - allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction - allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction -} - -export interface ClusterRerouteCommandAllocatePrimaryAction { - index: IndexName - shard: integer - node: string - accept_data_loss: boolean -} - -export interface ClusterRerouteCommandAllocateReplicaAction { - index: IndexName - shard: integer - node: string -} - -export interface ClusterRerouteCommandCancelAction { - index: IndexName - shard: integer - node: string - allow_primary?: boolean -} - -export interface ClusterRerouteCommandMoveAction { - index: IndexName - shard: integer - from_node: string - to_node: string -} - -export interface ClusterRerouteRequest extends RequestBase { - dry_run?: boolean - explain?: boolean - metric?: Metrics - retry_failed?: boolean - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - commands?: ClusterRerouteCommand[] - } -} - -export interface ClusterRerouteRerouteDecision { - decider: string - decision: string - explanation: string -} - -export interface ClusterRerouteRerouteExplanation { - command: string - decisions: ClusterRerouteRerouteDecision[] - parameters: ClusterRerouteRerouteParameters -} - -export interface ClusterRerouteRerouteParameters { - allow_primary: boolean - index: IndexName - node: NodeName - shard: integer - from_node?: NodeName - to_node?: NodeName -} - -export interface ClusterRerouteResponse { - acknowledged: boolean - explanations?: ClusterRerouteRerouteExplanation[] - state?: any -} - -export interface ClusterStateRequest extends RequestBase { - metric?: Metrics - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - local?: boolean - master_timeout?: Duration - wait_for_metadata_version?: VersionNumber - wait_for_timeout?: Duration -} - -export type ClusterStateResponse = any - -export interface ClusterStatsCCSStats { - clusters?: Record - _search: ClusterStatsCCSUsageStats - _esql?: ClusterStatsCCSUsageStats -} - -export interface ClusterStatsCCSUsageClusterStats { - total: integer - skipped: integer - took: ClusterStatsCCSUsageTimeValue -} - -export interface ClusterStatsCCSUsageStats { - total: integer - success: integer - skipped: integer - took: ClusterStatsCCSUsageTimeValue - took_mrt_true?: ClusterStatsCCSUsageTimeValue - took_mrt_false?: ClusterStatsCCSUsageTimeValue - remotes_per_search_max: integer - remotes_per_search_avg: double - failure_reasons: Record - features: Record - clients: Record - clusters: Record -} - -export interface ClusterStatsCCSUsageTimeValue { - max: DurationValue - avg: DurationValue - p90: DurationValue -} - -export interface ClusterStatsCharFilterTypes { - analyzer_types: ClusterStatsFieldTypes[] - built_in_analyzers: ClusterStatsFieldTypes[] - built_in_char_filters: ClusterStatsFieldTypes[] - built_in_filters: ClusterStatsFieldTypes[] - built_in_tokenizers: ClusterStatsFieldTypes[] - char_filter_types: ClusterStatsFieldTypes[] - filter_types: ClusterStatsFieldTypes[] - tokenizer_types: ClusterStatsFieldTypes[] - synonyms: Record -} - -export interface ClusterStatsClusterFileSystem { - path?: string - mount?: string - type?: string - available_in_bytes?: long - available?: ByteSize - free_in_bytes?: long - free?: ByteSize - total_in_bytes?: long - total?: ByteSize - low_watermark_free_space?: ByteSize - low_watermark_free_space_in_bytes?: long - high_watermark_free_space?: ByteSize - high_watermark_free_space_in_bytes?: long - flood_stage_free_space?: ByteSize - flood_stage_free_space_in_bytes?: long - frozen_flood_stage_free_space?: ByteSize - frozen_flood_stage_free_space_in_bytes?: long -} - -export interface ClusterStatsClusterIndices { - analysis?: ClusterStatsCharFilterTypes - completion: CompletionStats - count: long - docs: DocStats - fielddata: FielddataStats - query_cache: QueryCacheStats - search: ClusterStatsSearchUsageStats - segments: SegmentsStats - shards: ClusterStatsClusterIndicesShards - store: StoreStats - mappings?: ClusterStatsFieldTypesMappings - versions?: ClusterStatsIndicesVersions[] - dense_vector: ClusterStatsDenseVectorStats - sparse_vector: ClusterStatsSparseVectorStats -} - -export interface ClusterStatsClusterIndicesShards { - index?: ClusterStatsClusterIndicesShardsIndex - primaries?: double - replication?: double - total?: double -} - -export interface ClusterStatsClusterIndicesShardsIndex { - primaries: ClusterStatsClusterShardMetrics - replication: ClusterStatsClusterShardMetrics - shards: ClusterStatsClusterShardMetrics -} - -export interface ClusterStatsClusterIngest { - number_of_pipelines: integer - processor_stats: Record -} - -export interface ClusterStatsClusterJvm { - max_uptime_in_millis: DurationValue - max_uptime?: Duration - mem: ClusterStatsClusterJvmMemory - threads: long - versions: ClusterStatsClusterJvmVersion[] -} - -export interface ClusterStatsClusterJvmMemory { - heap_max_in_bytes: long - heap_max?: ByteSize - heap_used_in_bytes: long - heap_used?: ByteSize -} - -export interface ClusterStatsClusterJvmVersion { - bundled_jdk: boolean - count: integer - using_bundled_jdk: boolean - version: VersionString - vm_name: string - vm_vendor: string - vm_version: VersionString -} - -export interface ClusterStatsClusterNetworkTypes { - http_types: Record - transport_types: Record -} - -export interface ClusterStatsClusterNodeCount { - total: integer - coordinating_only?: integer - data?: integer - data_cold?: integer - data_content?: integer - data_frozen?: integer - data_hot?: integer - data_warm?: integer - index?: integer - ingest?: integer - master?: integer - ml?: integer - remote_cluster_client?: integer - search?: integer - transform?: integer - voting_only?: integer -} - -export interface ClusterStatsClusterNodes { - count: ClusterStatsClusterNodeCount - discovery_types: Record - fs: ClusterStatsClusterFileSystem - indexing_pressure: ClusterStatsIndexingPressure - ingest: ClusterStatsClusterIngest - jvm: ClusterStatsClusterJvm - network_types: ClusterStatsClusterNetworkTypes - os: ClusterStatsClusterOperatingSystem - packaging_types: ClusterStatsNodePackagingType[] - plugins: PluginStats[] - process: ClusterStatsClusterProcess - versions: VersionString[] -} - -export interface ClusterStatsClusterOperatingSystem { - allocated_processors: integer - architectures?: ClusterStatsClusterOperatingSystemArchitecture[] - available_processors: integer - mem: ClusterStatsOperatingSystemMemoryInfo - names: ClusterStatsClusterOperatingSystemName[] - pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] -} - -export interface ClusterStatsClusterOperatingSystemArchitecture { - arch: string - count: integer -} - -export interface ClusterStatsClusterOperatingSystemName { - count: integer - name: Name -} - -export interface ClusterStatsClusterOperatingSystemPrettyName { - count: integer - pretty_name: Name -} - -export interface ClusterStatsClusterProcess { - cpu: ClusterStatsClusterProcessCpu - open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors -} - -export interface ClusterStatsClusterProcessCpu { - percent: integer -} - -export interface ClusterStatsClusterProcessOpenFileDescriptors { - avg: long - max: long - min: long -} - -export interface ClusterStatsClusterProcessor { - count: long - current: long - failed: long - time?: Duration - time_in_millis: DurationValue -} - -export interface ClusterStatsClusterShardMetrics { - avg: double - max: double - min: double -} - -export interface ClusterStatsClusterSnapshotStats { - current_counts: ClusterStatsSnapshotCurrentCounts - repositories: Record -} - -export interface ClusterStatsDenseVectorOffHeapStats { - total_size_bytes: long - total_size?: ByteSize - total_veb_size_bytes: long - total_veb_size?: ByteSize - total_vec_size_bytes: long - total_vec_size?: ByteSize - total_veq_size_bytes: long - total_veq_size?: ByteSize - total_vex_size_bytes: long - total_vex_size?: ByteSize - fielddata?: Record> -} - -export interface ClusterStatsDenseVectorStats { - value_count: long - off_heap?: ClusterStatsDenseVectorOffHeapStats -} - -export interface ClusterStatsFieldTypes { - name: Name - count: integer - index_count: integer - indexed_vector_count?: integer - indexed_vector_dim_max?: integer - indexed_vector_dim_min?: integer - script_count?: integer - vector_index_type_count?: Record - vector_similarity_type_count?: Record - vector_element_type_count?: Record -} - -export interface ClusterStatsFieldTypesMappings { - field_types: ClusterStatsFieldTypes[] - runtime_field_types: ClusterStatsRuntimeFieldTypes[] - total_field_count?: long - total_deduplicated_field_count?: long - total_deduplicated_mapping_size?: ByteSize - total_deduplicated_mapping_size_in_bytes?: long - source_modes: Record -} - -export interface ClusterStatsIndexingPressure { - memory: NodesIndexingPressureMemory -} - -export interface ClusterStatsIndicesVersions { - index_count: integer - primary_shard_count: integer - total_primary_bytes: long - total_primary_size?: ByteSize - version: VersionString -} - -export interface ClusterStatsNodePackagingType { - count: integer - flavor: string - type: string -} - -export interface ClusterStatsOperatingSystemMemoryInfo { - adjusted_total_in_bytes?: long - adjusted_total?: ByteSize - free_in_bytes: long - free?: ByteSize - free_percent: integer - total_in_bytes: long - total?: ByteSize - used_in_bytes: long - used?: ByteSize - used_percent: integer -} - -export interface ClusterStatsPerRepositoryStats { - type: string - oldest_start_time_millis: UnitMillis - oldest_start_time?: DateFormat - current_counts: ClusterStatsRepositoryStatsCurrentCounts -} - -export interface ClusterStatsRemoteClusterInfo { - cluster_uuid: string - mode: string - skip_unavailable: boolean - 'transport.compress': string - status: HealthStatus - version: VersionString[] - nodes_count: integer - shards_count: integer - indices_count: integer - indices_total_size_in_bytes: long - indices_total_size?: string - max_heap_in_bytes: long - max_heap?: string - mem_total_in_bytes: long - mem_total?: string -} - -export interface ClusterStatsRepositoryStatsCurrentCounts { - snapshots: integer - clones: integer - finalizations: integer - deletions: integer - snapshot_deletions: integer - active_deletions: integer - shards: ClusterStatsRepositoryStatsShards -} - -export interface ClusterStatsRepositoryStatsShards { - total: integer - complete: integer - incomplete: integer - states: Record -} - -export interface ClusterStatsRequest extends RequestBase { - node_id?: NodeIds - include_remotes?: boolean - timeout?: Duration -} - -export type ClusterStatsResponse = ClusterStatsStatsResponseBase - -export interface ClusterStatsRuntimeFieldTypes { - chars_max: integer - chars_total: integer - count: integer - doc_max: integer - doc_total: integer - index_count: integer - lang: string[] - lines_max: integer - lines_total: integer - name: Name - scriptless_count: integer - shadowed_count: integer - source_max: integer - source_total: integer -} - -export interface ClusterStatsSearchUsageStats { - total: long - queries: Record - rescorers: Record - sections: Record - retrievers: Record -} - -export type ClusterStatsShardState = 'INIT' | 'SUCCESS' | 'FAILED' | 'ABORTED' | 'MISSING' | 'WAITING' | 'QUEUED' | 'PAUSED_FOR_NODE_REMOVAL' - -export interface ClusterStatsSnapshotCurrentCounts { - snapshots: integer - shard_snapshots: integer - snapshot_deletions: integer - concurrent_operations: integer - cleanups: integer -} - -export interface ClusterStatsSparseVectorStats { - value_count: long -} - -export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { - cluster_name: Name - cluster_uuid: Uuid - indices: ClusterStatsClusterIndices - nodes: ClusterStatsClusterNodes - repositories: Record> - snapshots: ClusterStatsClusterSnapshotStats - status?: HealthStatus - timestamp: long - ccs: ClusterStatsCCSStats -} - -export interface ClusterStatsSynonymsStats { - count: integer - index_count: integer -} - -export interface ConnectorConnector { - api_key_id?: string - api_key_secret_id?: string - configuration: ConnectorConnectorConfiguration - custom_scheduling: ConnectorConnectorCustomScheduling - description?: string - error?: string | null - features?: ConnectorConnectorFeatures - filtering: ConnectorFilteringConfig[] - id?: Id - index_name?: IndexName | null - is_native: boolean - language?: string - last_access_control_sync_error?: string - last_access_control_sync_scheduled_at?: DateTime - last_access_control_sync_status?: ConnectorSyncStatus - last_deleted_document_count?: long - last_incremental_sync_scheduled_at?: DateTime - last_indexed_document_count?: long - last_seen?: DateTime - last_sync_error?: string - last_sync_scheduled_at?: DateTime - last_sync_status?: ConnectorSyncStatus - last_synced?: DateTime - name?: string - pipeline?: ConnectorIngestPipelineParams - scheduling: ConnectorSchedulingConfiguration - service_type?: string - status: ConnectorConnectorStatus - sync_cursor?: any - sync_now: boolean -} - -export interface ConnectorConnectorConfigProperties { - category?: string - default_value: ScalarValue - depends_on: ConnectorDependency[] - display: ConnectorDisplayType - label: string - options: ConnectorSelectOption[] - order?: integer - placeholder?: string - required: boolean - sensitive: boolean - tooltip?: string | null - type?: ConnectorConnectorFieldType - ui_restrictions?: string[] - validations?: ConnectorValidation[] - value: any -} - -export type ConnectorConnectorConfiguration = Record - -export type ConnectorConnectorCustomScheduling = Record - -export interface ConnectorConnectorFeatures { - document_level_security?: ConnectorFeatureEnabled - incremental_sync?: ConnectorFeatureEnabled - native_connector_api_keys?: ConnectorFeatureEnabled - sync_rules?: ConnectorSyncRulesFeature -} - -export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' - -export interface ConnectorConnectorScheduling { - enabled: boolean - interval: string -} - -export type ConnectorConnectorStatus = 'created' | 'needs_configuration' | 'configured' | 'connected' | 'error' - -export interface ConnectorConnectorSyncJob { - cancelation_requested_at?: DateTime - canceled_at?: DateTime - completed_at?: DateTime - connector: ConnectorSyncJobConnectorReference - created_at: DateTime - deleted_document_count: long - error?: string - id: Id - indexed_document_count: long - indexed_document_volume: long - job_type: ConnectorSyncJobType - last_seen?: DateTime - metadata: Record - started_at?: DateTime - status: ConnectorSyncStatus - total_document_count: long - trigger_method: ConnectorSyncJobTriggerMethod - worker_hostname?: string -} - -export interface ConnectorCustomScheduling { - configuration_overrides: ConnectorCustomSchedulingConfigurationOverrides - enabled: boolean - interval: string - last_synced?: DateTime - name: string -} - -export interface ConnectorCustomSchedulingConfigurationOverrides { - max_crawl_depth?: integer - sitemap_discovery_disabled?: boolean - domain_allowlist?: string[] - sitemap_urls?: string[] - seed_urls?: string[] -} - -export interface ConnectorDependency { - field: string - value: ScalarValue -} - -export type ConnectorDisplayType = 'textbox' | 'textarea' | 'numeric' | 'toggle' | 'dropdown' - -export interface ConnectorFeatureEnabled { - enabled: boolean -} - -export interface ConnectorFilteringAdvancedSnippet { - created_at?: DateTime - updated_at?: DateTime - value: any -} - -export interface ConnectorFilteringConfig { - active: ConnectorFilteringRules - domain?: string - draft: ConnectorFilteringRules -} - -export type ConnectorFilteringPolicy = 'exclude' | 'include' - -export interface ConnectorFilteringRule { - created_at?: DateTime - field: Field - id: Id - order: integer - policy: ConnectorFilteringPolicy - rule: ConnectorFilteringRuleRule - updated_at?: DateTime - value: string -} - -export type ConnectorFilteringRuleRule = 'contains' | 'ends_with' | 'equals' | 'regex' | 'starts_with' | '>' | '<' - -export interface ConnectorFilteringRules { - advanced_snippet: ConnectorFilteringAdvancedSnippet - rules: ConnectorFilteringRule[] - validation: ConnectorFilteringRulesValidation -} - -export interface ConnectorFilteringRulesValidation { - errors: ConnectorFilteringValidation[] - state: ConnectorFilteringValidationState -} - -export interface ConnectorFilteringValidation { - ids: Id[] - messages: string[] -} - -export type ConnectorFilteringValidationState = 'edited' | 'invalid' | 'valid' - -export interface ConnectorGreaterThanValidation { - type: 'greater_than' - constraint: double -} - -export interface ConnectorIncludedInValidation { - type: 'included_in' - constraint: ScalarValue[] -} - -export interface ConnectorIngestPipelineParams { - extract_binary_content: boolean - name: string - reduce_whitespace: boolean - run_ml_inference: boolean -} - -export interface ConnectorLessThanValidation { - type: 'less_than' - constraint: double -} - -export interface ConnectorListTypeValidation { - type: 'list_type' - constraint: string -} - -export interface ConnectorRegexValidation { - type: 'regex' - constraint: string -} - -export interface ConnectorSchedulingConfiguration { - access_control?: ConnectorConnectorScheduling - full?: ConnectorConnectorScheduling - incremental?: ConnectorConnectorScheduling -} - -export interface ConnectorSelectOption { - label: string - value: ScalarValue -} - -export interface ConnectorSyncJobConnectorReference { - configuration: ConnectorConnectorConfiguration - filtering: ConnectorFilteringRules - id: Id - index_name: string - language?: string - pipeline?: ConnectorIngestPipelineParams - service_type: string - sync_cursor?: any -} - -export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' - -export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' - -export interface ConnectorSyncRulesFeature { - advanced?: ConnectorFeatureEnabled - basic?: ConnectorFeatureEnabled -} - -export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'error' | 'in_progress' | 'pending' | 'suspended' - -export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation - -export interface ConnectorCheckInRequest extends RequestBase { - connector_id: Id -} - -export interface ConnectorCheckInResponse { - result: Result -} - -export interface ConnectorDeleteRequest extends RequestBase { - connector_id: Id - delete_sync_jobs?: boolean -} - -export type ConnectorDeleteResponse = AcknowledgedResponseBase - -export interface ConnectorGetRequest extends RequestBase { - connector_id: Id -} - -export type ConnectorGetResponse = ConnectorConnector - -export interface ConnectorLastSyncRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - last_access_control_sync_error?: string - last_access_control_sync_scheduled_at?: DateTime - last_access_control_sync_status?: ConnectorSyncStatus - last_deleted_document_count?: long - last_incremental_sync_scheduled_at?: DateTime - last_indexed_document_count?: long - last_seen?: DateTime - last_sync_error?: string - last_sync_scheduled_at?: DateTime - last_sync_status?: ConnectorSyncStatus - last_synced?: DateTime - sync_cursor?: any - } -} - -export interface ConnectorLastSyncResponse { - result: Result -} - -export interface ConnectorListRequest extends RequestBase { - from?: integer - size?: integer - index_name?: Indices - connector_name?: Names - service_type?: Names - query?: string -} - -export interface ConnectorListResponse { - count: long - results: ConnectorConnector[] -} - -export interface ConnectorPostRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - index_name?: IndexName - is_native?: boolean - language?: string - name?: string - service_type?: string - } -} - -export interface ConnectorPostResponse { - result: Result - id: Id -} - -export interface ConnectorPutRequest extends RequestBase { - connector_id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - index_name?: IndexName - is_native?: boolean - language?: string - name?: string - service_type?: string - } -} - -export interface ConnectorPutResponse { - result: Result - id: Id -} - -export interface ConnectorSyncJobCancelRequest extends RequestBase { - connector_sync_job_id: Id -} - -export interface ConnectorSyncJobCancelResponse { - result: Result -} - -export interface ConnectorSyncJobCheckInRequest extends RequestBase { - connector_sync_job_id: Id -} - -export interface ConnectorSyncJobCheckInResponse { -} - -export interface ConnectorSyncJobClaimRequest extends RequestBase { - connector_sync_job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - sync_cursor?: any - worker_hostname: string - } -} - -export interface ConnectorSyncJobClaimResponse { -} - -export interface ConnectorSyncJobDeleteRequest extends RequestBase { - connector_sync_job_id: Id -} - -export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase - -export interface ConnectorSyncJobErrorRequest extends RequestBase { - connector_sync_job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - error: string - } -} - -export interface ConnectorSyncJobErrorResponse { -} - -export interface ConnectorSyncJobGetRequest extends RequestBase { - connector_sync_job_id: Id -} - -export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob - -export interface ConnectorSyncJobListRequest extends RequestBase { - from?: integer - size?: integer - status?: ConnectorSyncStatus - connector_id?: Id - job_type?: ConnectorSyncJobType | ConnectorSyncJobType[] -} - -export interface ConnectorSyncJobListResponse { - count: long - results: ConnectorConnectorSyncJob[] -} - -export interface ConnectorSyncJobPostRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - id: Id - job_type?: ConnectorSyncJobType - trigger_method?: ConnectorSyncJobTriggerMethod - } -} - -export interface ConnectorSyncJobPostResponse { - id: Id -} - -export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { - connector_sync_job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - deleted_document_count: long - indexed_document_count: long - indexed_document_volume: long - last_seen?: Duration - metadata?: Metadata - total_document_count?: integer - } -} - -export interface ConnectorSyncJobUpdateStatsResponse { -} - -export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { - connector_id: Id -} - -export interface ConnectorUpdateActiveFilteringResponse { - result: Result -} - -export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - api_key_id?: string - api_key_secret_id?: string - } -} - -export interface ConnectorUpdateApiKeyIdResponse { - result: Result -} - -export interface ConnectorUpdateConfigurationRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - configuration?: ConnectorConnectorConfiguration - values?: Record - } -} - -export interface ConnectorUpdateConfigurationResponse { - result: Result -} - -export interface ConnectorUpdateErrorRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - error: SpecUtilsWithNullValue - } -} - -export interface ConnectorUpdateErrorResponse { - result: Result -} - -export interface ConnectorUpdateFeaturesRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - features: ConnectorConnectorFeatures - } -} - -export interface ConnectorUpdateFeaturesResponse { - result: Result -} - -export interface ConnectorUpdateFilteringRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - filtering?: ConnectorFilteringConfig[] - rules?: ConnectorFilteringRule[] - advanced_snippet?: ConnectorFilteringAdvancedSnippet - } -} - -export interface ConnectorUpdateFilteringResponse { - result: Result -} - -export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - validation: ConnectorFilteringRulesValidation - } -} - -export interface ConnectorUpdateFilteringValidationResponse { - result: Result -} - -export interface ConnectorUpdateIndexNameRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - index_name: SpecUtilsWithNullValue - } -} - -export interface ConnectorUpdateIndexNameResponse { - result: Result -} - -export interface ConnectorUpdateNameRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - name?: string - description?: string - } -} - -export interface ConnectorUpdateNameResponse { - result: Result -} - -export interface ConnectorUpdateNativeRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - is_native: boolean - } -} - -export interface ConnectorUpdateNativeResponse { - result: Result -} - -export interface ConnectorUpdatePipelineRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - pipeline: ConnectorIngestPipelineParams - } -} - -export interface ConnectorUpdatePipelineResponse { - result: Result -} - -export interface ConnectorUpdateSchedulingRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - scheduling: ConnectorSchedulingConfiguration - } -} - -export interface ConnectorUpdateSchedulingResponse { - result: Result -} - -export interface ConnectorUpdateServiceTypeRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - service_type: string - } -} - -export interface ConnectorUpdateServiceTypeResponse { - result: Result -} - -export interface ConnectorUpdateStatusRequest extends RequestBase { - connector_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - status: ConnectorConnectorStatus - } -} - -export interface ConnectorUpdateStatusResponse { - result: Result -} - -export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { - index_uuid: Uuid - accept_data_loss: boolean - master_timeout?: Duration - timeout?: Duration -} - -export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase - -export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { - index_uuid: Uuid - accept_data_loss: boolean - master_timeout?: Duration - timeout?: Duration -} - -export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase - -export interface DanglingIndicesListDanglingIndicesDanglingIndex { - index_name: string - index_uuid: string - creation_date_millis: EpochTime - node_ids: Ids -} - -export interface DanglingIndicesListDanglingIndicesRequest extends RequestBase { -} - -export interface DanglingIndicesListDanglingIndicesResponse { - dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[] -} - -export interface EnrichPolicy { - enrich_fields: Fields - indices: Indices - match_field: Field - query?: QueryDslQueryContainer - name?: Name - elasticsearch_version?: string -} - -export type EnrichPolicyType = 'geo_match' | 'match' | 'range' - -export interface EnrichSummary { - config: Partial> -} - -export interface EnrichDeletePolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration -} - -export type EnrichDeletePolicyResponse = AcknowledgedResponseBase - -export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' | 'CANCELLED' - -export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { - phase: EnrichExecutePolicyEnrichPolicyPhase - step?: string -} - -export interface EnrichExecutePolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration - wait_for_completion?: boolean -} - -export interface EnrichExecutePolicyResponse { - status?: EnrichExecutePolicyExecuteEnrichPolicyStatus - task?: TaskId -} - -export interface EnrichGetPolicyRequest extends RequestBase { - name?: Names - master_timeout?: Duration -} - -export interface EnrichGetPolicyResponse { - policies: EnrichSummary[] -} - -export interface EnrichPutPolicyRequest extends RequestBase { - name: Name - master_timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - geo_match?: EnrichPolicy - match?: EnrichPolicy - range?: EnrichPolicy - } -} - -export type EnrichPutPolicyResponse = AcknowledgedResponseBase - -export interface EnrichStatsCacheStats { - node_id: Id - count: integer - hits: integer - hits_time_in_millis: DurationValue - misses: integer - misses_time_in_millis: DurationValue - evictions: integer - size_in_bytes: long -} - -export interface EnrichStatsCoordinatorStats { - executed_searches_total: long - node_id: Id - queue_size: integer - remote_requests_current: integer - remote_requests_total: long -} - -export interface EnrichStatsExecutingPolicy { - name: Name - task: TasksTaskInfo -} - -export interface EnrichStatsRequest extends RequestBase { - master_timeout?: Duration -} - -export interface EnrichStatsResponse { - coordinator_stats: EnrichStatsCoordinatorStats[] - executing_policies: EnrichStatsExecutingPolicy[] - cache_stats?: EnrichStatsCacheStats[] -} - -export interface EqlEqlHits { - total?: SearchTotalHits - events?: EqlHitsEvent[] - sequences?: EqlHitsSequence[] -} - -export interface EqlEqlSearchResponseBase { - id?: Id - is_partial?: boolean - is_running?: boolean - took?: DurationValue - timed_out?: boolean - hits: EqlEqlHits - shard_failures?: ShardFailure[] -} - -export interface EqlHitsEvent { - _index: IndexName - _id: Id - _source: TEvent - missing?: boolean - fields?: Record -} - -export interface EqlHitsSequence { - events: EqlHitsEvent[] - join_keys?: any[] -} - -export interface EqlDeleteRequest extends RequestBase { - id: Id -} - -export type EqlDeleteResponse = AcknowledgedResponseBase - -export interface EqlGetRequest extends RequestBase { - id: Id - keep_alive?: Duration - wait_for_completion_timeout?: Duration -} - -export type EqlGetResponse = EqlEqlSearchResponseBase - -export interface EqlGetStatusRequest extends RequestBase { - id: Id -} - -export interface EqlGetStatusResponse { - id: Id - is_partial: boolean - is_running: boolean - start_time_in_millis?: EpochTime - expiration_time_in_millis?: EpochTime - completion_status?: integer -} - -export interface EqlSearchRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ccs_minimize_roundtrips?: boolean - ignore_unavailable?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query: string - case_sensitive?: boolean - event_category_field?: Field - tiebreaker_field?: Field - timestamp_field?: Field - fetch_size?: uint - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - keep_alive?: Duration - keep_on_completion?: boolean - wait_for_completion_timeout?: Duration - allow_partial_search_results?: boolean - allow_partial_sequence_results?: boolean - size?: uint - fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] - result_position?: EqlSearchResultPosition - runtime_mappings?: MappingRuntimeFields - max_samples_per_key?: integer - } -} - -export type EqlSearchResponse = EqlEqlSearchResponseBase - -export type EqlSearchResultPosition = 'tail' | 'head' - -export interface EsqlAsyncEsqlResult extends EsqlEsqlResult { - id?: string - is_running: boolean -} - -export interface EsqlEsqlClusterDetails { - status: EsqlEsqlClusterStatus - indices: string - took?: DurationValue - _shards?: EsqlEsqlShardInfo -} - -export interface EsqlEsqlClusterInfo { - total: integer - successful: integer - running: integer - skipped: integer - partial: integer - failed: integer - details: Record -} - -export type EsqlEsqlClusterStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' - -export interface EsqlEsqlColumnInfo { - name: string - type: string -} - -export interface EsqlEsqlResult { - took?: DurationValue - is_partial?: boolean - all_columns?: EsqlEsqlColumnInfo[] - columns: EsqlEsqlColumnInfo[] - values: FieldValue[][] - _clusters?: EsqlEsqlClusterInfo - profile?: any -} - -export interface EsqlEsqlShardFailure { - shard: Id - index: IndexName - node?: NodeId - reason: ErrorCause -} - -export interface EsqlEsqlShardInfo { - total: integer - successful?: integer - skipped?: integer - failed?: integer - failures?: EsqlEsqlShardFailure[] -} - -export interface EsqlTableValuesContainer { - integer?: EsqlTableValuesIntegerValue[] - keyword?: EsqlTableValuesKeywordValue[] - long?: EsqlTableValuesLongValue[] - double?: EsqlTableValuesLongDouble[] -} - -export type EsqlTableValuesIntegerValue = integer | integer[] - -export type EsqlTableValuesKeywordValue = string | string[] - -export type EsqlTableValuesLongDouble = double | double[] - -export type EsqlTableValuesLongValue = long | long[] - -export interface EsqlAsyncQueryRequest extends RequestBase { - allow_partial_results?: boolean - delimiter?: string - drop_null_columns?: boolean - format?: EsqlQueryEsqlFormat - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - columnar?: boolean - filter?: QueryDslQueryContainer - locale?: string - params?: FieldValue[] - profile?: boolean - query: string - tables?: Record> - include_ccs_metadata?: boolean - wait_for_completion_timeout?: Duration - keep_alive?: Duration - keep_on_completion?: boolean - } -} - -export type EsqlAsyncQueryResponse = EsqlEsqlResult - -export interface EsqlAsyncQueryDeleteRequest extends RequestBase { - id: Id -} - -export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase - -export interface EsqlAsyncQueryGetRequest extends RequestBase { - id: Id - drop_null_columns?: boolean - format?: EsqlQueryEsqlFormat - keep_alive?: Duration - wait_for_completion_timeout?: Duration -} - -export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult - -export interface EsqlAsyncQueryStopRequest extends RequestBase { - id: Id - drop_null_columns?: boolean -} - -export type EsqlAsyncQueryStopResponse = EsqlEsqlResult - -export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' - -export interface EsqlQueryRequest extends RequestBase { - format?: EsqlQueryEsqlFormat - delimiter?: string - drop_null_columns?: boolean - allow_partial_results?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - columnar?: boolean - filter?: QueryDslQueryContainer - locale?: string - params?: FieldValue[] - profile?: boolean - query: string - tables?: Record> - include_ccs_metadata?: boolean - } -} - -export type EsqlQueryResponse = EsqlEsqlResult - -export interface FeaturesFeature { - name: string - description: string -} - -export interface FeaturesGetFeaturesRequest extends RequestBase { - master_timeout?: Duration -} - -export interface FeaturesGetFeaturesResponse { - features: FeaturesFeature[] -} - -export interface FeaturesResetFeaturesRequest extends RequestBase { - master_timeout?: Duration -} - -export interface FeaturesResetFeaturesResponse { - features: FeaturesFeature[] -} - -export type FleetCheckpoint = long - -export interface FleetGlobalCheckpointsRequest extends RequestBase { - index: IndexName | IndexAlias - wait_for_advance?: boolean - wait_for_index?: boolean - checkpoints?: FleetCheckpoint[] - timeout?: Duration -} - -export interface FleetGlobalCheckpointsResponse { - global_checkpoints: FleetCheckpoint[] - timed_out: boolean -} - -export interface FleetMsearchRequest extends RequestBase { - index?: IndexName | IndexAlias - allow_no_indices?: boolean - ccs_minimize_roundtrips?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - max_concurrent_searches?: long - max_concurrent_shard_requests?: long - pre_filter_shard_size?: long - search_type?: SearchType - rest_total_hits_as_int?: boolean - typed_keys?: boolean - wait_for_checkpoints?: FleetCheckpoint[] - allow_partial_search_results?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'searches' instead. */ - body?: MsearchRequestItem[] -} - -export interface FleetMsearchResponse { - docs: MsearchResponseItem[] -} - -export interface FleetSearchRequest extends RequestBase { - index: IndexName | IndexAlias - allow_no_indices?: boolean - analyzer?: string - analyze_wildcard?: boolean - batched_reduce_size?: long - ccs_minimize_roundtrips?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - lenient?: boolean - max_concurrent_shard_requests?: long - min_compatible_shard_node?: VersionString - preference?: string - pre_filter_shard_size?: long - request_cache?: boolean - routing?: Routing - scroll?: Duration - search_type?: SearchType - suggest_field?: Field - suggest_mode?: SuggestMode - suggest_size?: long - suggest_text?: string - typed_keys?: boolean - rest_total_hits_as_int?: boolean - _source_excludes?: Fields - _source_includes?: Fields - q?: string - wait_for_checkpoints?: FleetCheckpoint[] - allow_partial_search_results?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - collapse?: SearchFieldCollapse - explain?: boolean - ext?: Record - from?: integer - highlight?: SearchHighlight - track_total_hits?: SearchTrackHits - indices_boost?: Partial>[] - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - query?: QueryDslQueryContainer - rescore?: SearchRescore | SearchRescore[] - script_fields?: Record - search_after?: SortResults - size?: integer - slice?: SlicedScroll - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - suggest?: SearchSuggester - terminate_after?: long - timeout?: string - track_scores?: boolean - version?: boolean - seq_no_primary_term?: boolean - stored_fields?: Fields - pit?: SearchPointInTimeReference - runtime_mappings?: MappingRuntimeFields - stats?: string[] - } -} - -export interface FleetSearchResponse { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - aggregations?: Record - _clusters?: ClusterStatistics - fields?: Record - max_score?: double - num_reduce_phases?: long - profile?: SearchProfile - pit_id?: Id - _scroll_id?: ScrollId - suggest?: Record[]> - terminated_early?: boolean -} - -export interface GraphConnection { - doc_count: long - source: long - target: long - weight: double -} - -export interface GraphExploreControls { - sample_diversity?: GraphSampleDiversity - sample_size?: integer - timeout?: Duration - use_significance: boolean -} - -export interface GraphHop { - connections?: GraphHop - query: QueryDslQueryContainer - vertices: GraphVertexDefinition[] -} - -export interface GraphSampleDiversity { - field: Field - max_docs_per_value: integer -} - -export interface GraphVertex { - depth: long - field: Field - term: string - weight: double -} - -export interface GraphVertexDefinition { - exclude?: string[] - field: Field - include?: GraphVertexInclude[] - min_doc_count?: long - shard_min_doc_count?: long - size?: integer -} - -export interface GraphVertexInclude { - boost: double - term: string -} - -export interface GraphExploreRequest extends RequestBase { - index: Indices - routing?: Routing - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - connections?: GraphHop - controls?: GraphExploreControls - query?: QueryDslQueryContainer - vertices?: GraphVertexDefinition[] - } -} - -export interface GraphExploreResponse { - connections: GraphConnection[] - failures: ShardFailure[] - timed_out: boolean - took: long - vertices: GraphVertex[] -} - -export interface IlmActions { - allocate?: IlmAllocateAction - delete?: IlmDeleteAction - downsample?: IlmDownsampleAction - freeze?: EmptyObject - forcemerge?: IlmForceMergeAction - migrate?: IlmMigrateAction - readonly?: EmptyObject - rollover?: IlmRolloverAction - set_priority?: IlmSetPriorityAction - searchable_snapshot?: IlmSearchableSnapshotAction - shrink?: IlmShrinkAction - unfollow?: EmptyObject - wait_for_snapshot?: IlmWaitForSnapshotAction -} - -export interface IlmAllocateAction { - number_of_replicas?: integer - total_shards_per_node?: integer - include?: Record - exclude?: Record - require?: Record -} - -export interface IlmDeleteAction { - delete_searchable_snapshot?: boolean -} - -export interface IlmDownsampleAction { - fixed_interval: DurationLarge - wait_timeout?: Duration -} - -export interface IlmForceMergeAction { - max_num_segments: integer - index_codec?: string -} - -export interface IlmMigrateAction { - enabled?: boolean -} - -export interface IlmPhase { - actions?: IlmActions - min_age?: Duration -} - -export interface IlmPhases { - cold?: IlmPhase - delete?: IlmPhase - frozen?: IlmPhase - hot?: IlmPhase - warm?: IlmPhase -} - -export interface IlmPolicy { - phases: IlmPhases - _meta?: Metadata -} - -export interface IlmRolloverAction { - max_size?: ByteSize - max_primary_shard_size?: ByteSize - max_age?: Duration - max_docs?: long - max_primary_shard_docs?: long - min_size?: ByteSize - min_primary_shard_size?: ByteSize - min_age?: Duration - min_docs?: long - min_primary_shard_docs?: long -} - -export interface IlmSearchableSnapshotAction { - snapshot_repository: string - force_merge_index?: boolean -} - -export interface IlmSetPriorityAction { - priority?: integer -} - -export interface IlmShrinkAction { - number_of_shards?: integer - max_primary_shard_size?: ByteSize - allow_write_after_shrink?: boolean -} - -export interface IlmWaitForSnapshotAction { - policy: string -} - -export interface IlmDeleteLifecycleRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase - -export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged - -export interface IlmExplainLifecycleLifecycleExplainManaged { - action?: Name - action_time?: DateTime - action_time_millis?: EpochTime - age?: Duration - failed_step?: Name - failed_step_retry_count?: integer - index: IndexName - index_creation_date?: DateTime - index_creation_date_millis?: EpochTime - is_auto_retryable_error?: boolean - lifecycle_date?: DateTime - lifecycle_date_millis?: EpochTime - managed: true - phase?: Name - phase_time?: DateTime - phase_time_millis?: EpochTime - policy?: Name - previous_step_info?: Record - repository_name?: string - snapshot_name?: string - shrink_index_name?: string - step?: Name - step_info?: Record - step_time?: DateTime - step_time_millis?: EpochTime - phase_execution?: IlmExplainLifecycleLifecycleExplainPhaseExecution - time_since_index_creation?: Duration - skip: boolean -} - -export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { - phase_definition?: IlmPhase - policy: Name - version: VersionNumber - modified_date_in_millis: EpochTime -} - -export interface IlmExplainLifecycleLifecycleExplainUnmanaged { - index: IndexName - managed: false -} - -export interface IlmExplainLifecycleRequest extends RequestBase { - index: IndexName - only_errors?: boolean - only_managed?: boolean - master_timeout?: Duration -} - -export interface IlmExplainLifecycleResponse { - indices: Record -} - -export interface IlmGetLifecycleLifecycle { - modified_date: DateTime - policy: IlmPolicy - version: VersionNumber -} - -export interface IlmGetLifecycleRequest extends RequestBase { - name?: Name - master_timeout?: Duration - timeout?: Duration -} - -export type IlmGetLifecycleResponse = Record - -export interface IlmGetStatusRequest extends RequestBase { -} - -export interface IlmGetStatusResponse { - operation_mode: LifecycleOperationMode -} - -export interface IlmMigrateToDataTiersRequest extends RequestBase { - dry_run?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - legacy_template_to_delete?: string - node_attribute?: string - } -} - -export interface IlmMigrateToDataTiersResponse { - dry_run: boolean - removed_legacy_template: string - migrated_ilm_policies: string[] - migrated_indices: Indices - migrated_legacy_templates: string[] - migrated_composable_templates: string[] - migrated_component_templates: string[] -} - -export interface IlmMoveToStepRequest extends RequestBase { - index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - current_step: IlmMoveToStepStepKey - next_step: IlmMoveToStepStepKey - } -} - -export type IlmMoveToStepResponse = AcknowledgedResponseBase - -export interface IlmMoveToStepStepKey { - action?: string - name?: string - phase: string -} - -export interface IlmPutLifecycleRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - policy?: IlmPolicy - } -} - -export type IlmPutLifecycleResponse = AcknowledgedResponseBase - -export interface IlmRemovePolicyRequest extends RequestBase { - index: IndexName -} - -export interface IlmRemovePolicyResponse { - failed_indexes: IndexName[] - has_failures: boolean -} - -export interface IlmRetryRequest extends RequestBase { - index: IndexName -} - -export type IlmRetryResponse = AcknowledgedResponseBase - -export interface IlmStartRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export type IlmStartResponse = AcknowledgedResponseBase - -export interface IlmStopRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export type IlmStopResponse = AcknowledgedResponseBase - -export interface IndicesAlias { - filter?: QueryDslQueryContainer - index_routing?: Routing - is_hidden?: boolean - is_write_index?: boolean - routing?: Routing - search_routing?: Routing -} - -export interface IndicesAliasDefinition { - filter?: QueryDslQueryContainer - index_routing?: string - is_write_index?: boolean - routing?: string - search_routing?: string - is_hidden?: boolean -} - -export interface IndicesCacheQueries { - enabled: boolean -} - -export interface IndicesDataStream { - _meta?: Metadata - allow_custom_routing?: boolean - failure_store?: IndicesFailureStore - generation: integer - hidden: boolean - ilm_policy?: Name - next_generation_managed_by: IndicesManagedBy - prefer_ilm: boolean - indices: IndicesDataStreamIndex[] - lifecycle?: IndicesDataStreamLifecycleWithRollover - name: DataStreamName - replicated?: boolean - rollover_on_write: boolean - status: HealthStatus - system?: boolean - template: Name - timestamp_field: IndicesDataStreamTimestampField -} - -export interface IndicesDataStreamIndex { - index_name: IndexName - index_uuid: Uuid - ilm_policy?: Name - managed_by?: IndicesManagedBy - prefer_ilm?: boolean -} - -export interface IndicesDataStreamLifecycle { - data_retention?: Duration - downsampling?: IndicesDataStreamLifecycleDownsampling - enabled?: boolean -} - -export interface IndicesDataStreamLifecycleDownsampling { - rounds: IndicesDownsamplingRound[] -} - -export interface IndicesDataStreamLifecycleRolloverConditions { - min_age?: Duration - max_age?: string - min_docs?: long - max_docs?: long - min_size?: ByteSize - max_size?: ByteSize - min_primary_shard_size?: ByteSize - max_primary_shard_size?: ByteSize - min_primary_shard_docs?: long - max_primary_shard_docs?: long -} - -export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { - rollover?: IndicesDataStreamLifecycleRolloverConditions -} - -export interface IndicesDataStreamTimestampField { - name: Field -} - -export interface IndicesDataStreamVisibility { - hidden?: boolean - allow_custom_routing?: boolean -} - -export interface IndicesDownsampleConfig { - fixed_interval: DurationLarge -} - -export interface IndicesDownsamplingRound { - after: Duration - config: IndicesDownsampleConfig -} - -export interface IndicesFailureStore { - enabled: boolean - indices: IndicesDataStreamIndex[] - rollover_on_write: boolean -} - -export interface IndicesFielddataFrequencyFilter { - max: double - min: double - min_segment_size: integer -} - -export type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum' - -export interface IndicesIndexRouting { - allocation?: IndicesIndexRoutingAllocation - rebalance?: IndicesIndexRoutingRebalance -} - -export interface IndicesIndexRoutingAllocation { - enable?: IndicesIndexRoutingAllocationOptions - include?: IndicesIndexRoutingAllocationInclude - initial_recovery?: IndicesIndexRoutingAllocationInitialRecovery - disk?: IndicesIndexRoutingAllocationDisk -} - -export interface IndicesIndexRoutingAllocationDisk { - threshold_enabled?: boolean | string -} - -export interface IndicesIndexRoutingAllocationInclude { - _tier_preference?: string - _id?: Id -} - -export interface IndicesIndexRoutingAllocationInitialRecovery { - _id?: Id -} - -export type IndicesIndexRoutingAllocationOptions = 'all' | 'primaries' | 'new_primaries' | 'none' - -export interface IndicesIndexRoutingRebalance { - enable: IndicesIndexRoutingRebalanceOptions -} - -export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' - -export interface IndicesIndexSegmentSort { - field?: Fields - order?: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] - mode?: IndicesSegmentSortMode | IndicesSegmentSortMode[] - missing?: IndicesSegmentSortMissing | IndicesSegmentSortMissing[] -} - -export interface IndicesIndexSettingBlocks { - read_only?: SpecUtilsStringified - read_only_allow_delete?: SpecUtilsStringified - read?: SpecUtilsStringified - write?: SpecUtilsStringified - metadata?: SpecUtilsStringified -} - -export interface IndicesIndexSettingsKeys { - index?: IndicesIndexSettings - mode?: string - routing_path?: string | string[] - soft_deletes?: IndicesSoftDeletes - sort?: IndicesIndexSegmentSort - number_of_shards?: integer | string - number_of_replicas?: integer | string - number_of_routing_shards?: integer - check_on_startup?: IndicesIndexCheckOnStartup - codec?: string - routing_partition_size?: SpecUtilsStringified - load_fixed_bitset_filters_eagerly?: boolean - hidden?: boolean | string - auto_expand_replicas?: SpecUtilsWithNullValue - merge?: IndicesMerge - search?: IndicesSettingsSearch - refresh_interval?: Duration - max_result_window?: integer - max_inner_result_window?: integer - max_rescore_window?: integer - max_docvalue_fields_search?: integer - max_script_fields?: integer - max_ngram_diff?: integer - max_shingle_diff?: integer - blocks?: IndicesIndexSettingBlocks - max_refresh_listeners?: integer - analyze?: IndicesSettingsAnalyze - highlight?: IndicesSettingsHighlight - max_terms_count?: integer - max_regex_length?: integer - routing?: IndicesIndexRouting - gc_deletes?: Duration - default_pipeline?: PipelineName - final_pipeline?: PipelineName - lifecycle?: IndicesIndexSettingsLifecycle - provided_name?: Name - creation_date?: SpecUtilsStringified> - creation_date_string?: DateTime - uuid?: Uuid - version?: IndicesIndexVersioning - verified_before_close?: boolean | string - format?: string | integer - max_slices_per_scroll?: integer - translog?: IndicesTranslog - query_string?: IndicesSettingsQueryString - priority?: integer | string - top_metrics_max_size?: integer - analysis?: IndicesIndexSettingsAnalysis - settings?: IndicesIndexSettings - time_series?: IndicesIndexSettingsTimeSeries - queries?: IndicesQueries - similarity?: Record - mapping?: IndicesMappingLimitSettings - 'indexing.slowlog'?: IndicesIndexingSlowlogSettings - indexing_pressure?: IndicesIndexingPressure - store?: IndicesStorage -} -export type IndicesIndexSettings = IndicesIndexSettingsKeys -& { [property: string]: any } - -export interface IndicesIndexSettingsAnalysis { - analyzer?: Record - char_filter?: Record - filter?: Record - normalizer?: Record - tokenizer?: Record -} - -export interface IndicesIndexSettingsLifecycle { - name?: Name - indexing_complete?: SpecUtilsStringified - origination_date?: long - parse_origination_date?: boolean - step?: IndicesIndexSettingsLifecycleStep - rollover_alias?: string - prefer_ilm?: boolean | string -} - -export interface IndicesIndexSettingsLifecycleStep { - wait_time_threshold?: Duration -} - -export interface IndicesIndexSettingsTimeSeries { - end_time?: DateTime - start_time?: DateTime -} - -export interface IndicesIndexState { - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings - defaults?: IndicesIndexSettings - data_stream?: DataStreamName - lifecycle?: IndicesDataStreamLifecycle -} - -export interface IndicesIndexTemplate { - index_patterns: Names - composed_of: Name[] - template?: IndicesIndexTemplateSummary - version?: VersionNumber - priority?: long - _meta?: Metadata - allow_auto_create?: boolean - data_stream?: IndicesIndexTemplateDataStreamConfiguration - deprecated?: boolean - ignore_missing_component_templates?: Names -} - -export interface IndicesIndexTemplateDataStreamConfiguration { - hidden?: boolean - allow_custom_routing?: boolean -} - -export interface IndicesIndexTemplateSummary { - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings - lifecycle?: IndicesDataStreamLifecycleWithRollover -} - -export interface IndicesIndexVersioning { - created?: VersionString - created_string?: string -} - -export interface IndicesIndexingPressure { - memory: IndicesIndexingPressureMemory -} - -export interface IndicesIndexingPressureMemory { - limit?: integer -} - -export interface IndicesIndexingSlowlogSettings { - level?: string - source?: integer - reformat?: boolean - threshold?: IndicesIndexingSlowlogTresholds -} - -export interface IndicesIndexingSlowlogTresholds { - index?: IndicesSlowlogTresholdLevels -} - -export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' - -export interface IndicesMappingLimitSettings { - coerce?: boolean - total_fields?: IndicesMappingLimitSettingsTotalFields - depth?: IndicesMappingLimitSettingsDepth - nested_fields?: IndicesMappingLimitSettingsNestedFields - nested_objects?: IndicesMappingLimitSettingsNestedObjects - field_name_length?: IndicesMappingLimitSettingsFieldNameLength - dimension_fields?: IndicesMappingLimitSettingsDimensionFields - source?: IndicesMappingLimitSettingsSourceFields - ignore_malformed?: boolean | string -} - -export interface IndicesMappingLimitSettingsDepth { - limit?: long -} - -export interface IndicesMappingLimitSettingsDimensionFields { - limit?: long -} - -export interface IndicesMappingLimitSettingsFieldNameLength { - limit?: long -} - -export interface IndicesMappingLimitSettingsNestedFields { - limit?: long -} - -export interface IndicesMappingLimitSettingsNestedObjects { - limit?: long -} - -export interface IndicesMappingLimitSettingsSourceFields { - mode: IndicesSourceMode -} - -export interface IndicesMappingLimitSettingsTotalFields { - limit?: long | string - ignore_dynamic_beyond_limit?: boolean | string -} - -export interface IndicesMerge { - scheduler?: IndicesMergeScheduler -} - -export interface IndicesMergeScheduler { - max_thread_count?: SpecUtilsStringified - max_merge_count?: SpecUtilsStringified -} - -export interface IndicesNumericFielddata { - format: IndicesNumericFielddataFormat -} - -export type IndicesNumericFielddataFormat = 'array' | 'disabled' - -export interface IndicesQueries { - cache?: IndicesCacheQueries -} - -export interface IndicesRetentionLease { - period: Duration -} - -export interface IndicesSearchIdle { - after?: Duration -} - -export type IndicesSegmentSortMissing = '_last' | '_first' - -export type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX' - -export type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC' - -export interface IndicesSettingsAnalyze { - max_token_count?: SpecUtilsStringified -} - -export interface IndicesSettingsHighlight { - max_analyzed_offset?: integer -} - -export interface IndicesSettingsQueryString { - lenient: SpecUtilsStringified -} - -export interface IndicesSettingsSearch { - idle?: IndicesSearchIdle - slowlog?: IndicesSlowlogSettings -} - -export type IndicesSettingsSimilarity = IndicesSettingsSimilarityBm25 | IndicesSettingsSimilarityBoolean | IndicesSettingsSimilarityDfi | IndicesSettingsSimilarityDfr | IndicesSettingsSimilarityIb | IndicesSettingsSimilarityLmd | IndicesSettingsSimilarityLmj | IndicesSettingsSimilarityScripted - -export interface IndicesSettingsSimilarityBm25 { - type: 'BM25' - b?: double - discount_overlaps?: boolean - k1?: double -} - -export interface IndicesSettingsSimilarityBoolean { - type: 'boolean' -} - -export interface IndicesSettingsSimilarityDfi { - type: 'DFI' - independence_measure: DFIIndependenceMeasure -} - -export interface IndicesSettingsSimilarityDfr { - type: 'DFR' - after_effect: DFRAfterEffect - basic_model: DFRBasicModel - normalization: Normalization -} - -export interface IndicesSettingsSimilarityIb { - type: 'IB' - distribution: IBDistribution - lambda: IBLambda - normalization: Normalization -} - -export interface IndicesSettingsSimilarityLmd { - type: 'LMDirichlet' - mu?: double -} - -export interface IndicesSettingsSimilarityLmj { - type: 'LMJelinekMercer' - lambda?: double -} - -export interface IndicesSettingsSimilarityScripted { - type: 'scripted' - script: Script | string - weight_script?: Script | string -} - -export interface IndicesSlowlogSettings { - level?: string - source?: integer - reformat?: boolean - threshold?: IndicesSlowlogTresholds -} - -export interface IndicesSlowlogTresholdLevels { - warn?: Duration - info?: Duration - debug?: Duration - trace?: Duration -} - -export interface IndicesSlowlogTresholds { - query?: IndicesSlowlogTresholdLevels - fetch?: IndicesSlowlogTresholdLevels -} - -export interface IndicesSoftDeletes { - enabled?: boolean - retention_lease?: IndicesRetentionLease -} - -export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' - -export interface IndicesStorage { - type: IndicesStorageType - allow_mmap?: boolean - stats_refresh_interval?: Duration -} - -export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' | string - -export interface IndicesTemplateMapping { - aliases: Record - index_patterns: Name[] - mappings: MappingTypeMapping - order: integer - settings: Record - version?: VersionNumber -} - -export interface IndicesTranslog { - sync_interval?: Duration - durability?: IndicesTranslogDurability - flush_threshold_size?: ByteSize - retention?: IndicesTranslogRetention -} - -export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' - -export interface IndicesTranslogRetention { - size?: ByteSize - age?: Duration -} - -export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' - -export interface IndicesAddBlockIndicesBlockStatus { - name: IndexName - blocked: boolean -} - -export interface IndicesAddBlockRequest extends RequestBase { - index: IndexName - block: IndicesAddBlockIndicesBlockOptions - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export interface IndicesAddBlockResponse { - acknowledged: boolean - shards_acknowledged: boolean - indices: IndicesAddBlockIndicesBlockStatus[] -} - -export interface IndicesAnalyzeAnalyzeDetail { - analyzer?: IndicesAnalyzeAnalyzerDetail - charfilters?: IndicesAnalyzeCharFilterDetail[] - custom_analyzer: boolean - tokenfilters?: IndicesAnalyzeTokenDetail[] - tokenizer?: IndicesAnalyzeTokenDetail -} - -export interface IndicesAnalyzeAnalyzeToken { - end_offset: long - position: long - positionLength?: long - start_offset: long - token: string - type: string -} - -export interface IndicesAnalyzeAnalyzerDetail { - name: string - tokens: IndicesAnalyzeExplainAnalyzeToken[] -} - -export interface IndicesAnalyzeCharFilterDetail { - filtered_text: string[] - name: string -} - -export interface IndicesAnalyzeExplainAnalyzeTokenKeys { - bytes: string - end_offset: long - keyword?: boolean - position: long - positionLength: long - start_offset: long - termFrequency: long - token: string - type: string -} -export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeTokenKeys -& { [property: string]: any } - -export interface IndicesAnalyzeRequest extends RequestBase { - index?: IndexName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - analyzer?: string - attributes?: string[] - char_filter?: AnalysisCharFilter[] - explain?: boolean - field?: Field - filter?: AnalysisTokenFilter[] - normalizer?: string - text?: IndicesAnalyzeTextToAnalyze - tokenizer?: AnalysisTokenizer - } -} - -export interface IndicesAnalyzeResponse { - detail?: IndicesAnalyzeAnalyzeDetail - tokens?: IndicesAnalyzeAnalyzeToken[] -} - -export type IndicesAnalyzeTextToAnalyze = string | string[] - -export interface IndicesAnalyzeTokenDetail { - name: string - tokens: IndicesAnalyzeExplainAnalyzeToken[] -} - -export interface IndicesCancelMigrateReindexRequest extends RequestBase { - index: Indices -} - -export type IndicesCancelMigrateReindexResponse = AcknowledgedResponseBase - -export interface IndicesClearCacheRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - fielddata?: boolean - fields?: Fields - ignore_unavailable?: boolean - query?: boolean - request?: boolean -} - -export type IndicesClearCacheResponse = ShardsOperationResponseBase - -export interface IndicesCloneRequest extends RequestBase { - index: IndexName - target: Name - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - settings?: Record - } -} - -export interface IndicesCloneResponse { - acknowledged: boolean - index: IndexName - shards_acknowledged: boolean -} - -export interface IndicesCloseCloseIndexResult { - closed: boolean - shards?: Record -} - -export interface IndicesCloseCloseShardResult { - failures: ShardFailure[] -} - -export interface IndicesCloseRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards -} - -export interface IndicesCloseResponse { - acknowledged: boolean - indices: Record - shards_acknowledged: boolean -} - -export interface IndicesCreateRequest extends RequestBase { - index: IndexName - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings - } -} - -export interface IndicesCreateResponse { - index: IndexName - shards_acknowledged: boolean - acknowledged: boolean -} - -export interface IndicesCreateDataStreamRequest extends RequestBase { - name: DataStreamName - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase - -export interface IndicesCreateFromCreateFrom { - mappings_override?: MappingTypeMapping - settings_override?: IndicesIndexSettings - remove_index_blocks?: boolean -} - -export interface IndicesCreateFromRequest extends RequestBase { - source: IndexName - dest: IndexName - /** @deprecated The use of the 'body' key has been deprecated, use 'create_from' instead. */ - body?: IndicesCreateFromCreateFrom -} - -export interface IndicesCreateFromResponse { - acknowledged: boolean - index: IndexName - shards_acknowledged: boolean -} - -export interface IndicesDataStreamsStatsDataStreamsStatsItem { - backing_indices: integer - data_stream: Name - maximum_timestamp: EpochTime - store_size?: ByteSize - store_size_bytes: long -} - -export interface IndicesDataStreamsStatsRequest extends RequestBase { - name?: IndexName - expand_wildcards?: ExpandWildcards -} - -export interface IndicesDataStreamsStatsResponse { - _shards: ShardStatistics - backing_indices: integer - data_stream_count: integer - data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] - total_store_sizes?: ByteSize - total_store_size_bytes: long -} - -export interface IndicesDeleteRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteResponse = IndicesResponseBase - -export interface IndicesDeleteAliasIndicesAliasesResponseBody extends AcknowledgedResponseBase { - errors?: boolean -} - -export interface IndicesDeleteAliasRequest extends RequestBase { - index: Indices - name: Names - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteAliasResponse = IndicesDeleteAliasIndicesAliasesResponseBody - -export interface IndicesDeleteDataLifecycleRequest extends RequestBase { - name: DataStreamNames - expand_wildcards?: ExpandWildcards - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase - -export interface IndicesDeleteDataStreamRequest extends RequestBase { - name: DataStreamNames - master_timeout?: Duration - expand_wildcards?: ExpandWildcards -} - -export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase - -export interface IndicesDeleteIndexTemplateRequest extends RequestBase { - name: Names - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase - -export interface IndicesDeleteTemplateRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase - -export interface IndicesDiskUsageRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flush?: boolean - ignore_unavailable?: boolean - run_expensive_tasks?: boolean -} - -export type IndicesDiskUsageResponse = any - -export interface IndicesDownsampleRequest extends RequestBase { - index: IndexName - target_index: IndexName - /** @deprecated The use of the 'body' key has been deprecated, use 'config' instead. */ - body?: IndicesDownsampleConfig -} - -export type IndicesDownsampleResponse = any - -export interface IndicesExistsRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean -} - -export type IndicesExistsResponse = boolean - -export interface IndicesExistsAliasRequest extends RequestBase { - name: Names - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean -} - -export type IndicesExistsAliasResponse = boolean - -export interface IndicesExistsIndexTemplateRequest extends RequestBase { - name: Name - local?: boolean - flat_settings?: boolean - master_timeout?: Duration -} - -export type IndicesExistsIndexTemplateResponse = boolean - -export interface IndicesExistsTemplateRequest extends RequestBase { - name: Names - flat_settings?: boolean - local?: boolean - master_timeout?: Duration -} - -export type IndicesExistsTemplateResponse = boolean - -export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { - index: IndexName - managed_by_lifecycle: boolean - index_creation_date_millis?: EpochTime - time_since_index_creation?: Duration - rollover_date_millis?: EpochTime - time_since_rollover?: Duration - lifecycle?: IndicesDataStreamLifecycleWithRollover - generation_time?: Duration - error?: string -} - -export interface IndicesExplainDataLifecycleRequest extends RequestBase { - index: Indices - include_defaults?: boolean - master_timeout?: Duration -} - -export interface IndicesExplainDataLifecycleResponse { - indices: Record -} - -export interface IndicesFieldUsageStatsFieldSummary { - any: uint - stored_fields: uint - doc_values: uint - points: uint - norms: uint - term_vectors: uint - knn_vectors: uint - inverted_index: IndicesFieldUsageStatsInvertedIndex -} - -export interface IndicesFieldUsageStatsFieldsUsageBodyKeys { - _shards: ShardStatistics -} -export type IndicesFieldUsageStatsFieldsUsageBody = IndicesFieldUsageStatsFieldsUsageBodyKeys -& { [property: string]: IndicesFieldUsageStatsUsageStatsIndex | ShardStatistics } - -export interface IndicesFieldUsageStatsInvertedIndex { - terms: uint - postings: uint - proximity: uint - positions: uint - term_frequencies: uint - offsets: uint - payloads: uint -} - -export interface IndicesFieldUsageStatsRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - fields?: Fields -} - -export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody - -export interface IndicesFieldUsageStatsShardsStats { - all_fields: IndicesFieldUsageStatsFieldSummary - fields: Record -} - -export interface IndicesFieldUsageStatsUsageStatsIndex { - shards: IndicesFieldUsageStatsUsageStatsShards[] -} - -export interface IndicesFieldUsageStatsUsageStatsShards { - routing: IndicesStatsShardRouting - stats: IndicesFieldUsageStatsShardsStats - tracking_id: string - tracking_started_at_millis: EpochTime -} - -export interface IndicesFlushRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - force?: boolean - ignore_unavailable?: boolean - wait_if_ongoing?: boolean -} - -export type IndicesFlushResponse = ShardsOperationResponseBase - -export interface IndicesForcemergeRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flush?: boolean - ignore_unavailable?: boolean - max_num_segments?: long - only_expunge_deletes?: boolean - wait_for_completion?: boolean -} - -export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody - -export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { - task?: string -} - -export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' - -export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] - -export interface IndicesGetRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean - master_timeout?: Duration - features?: IndicesGetFeatures -} - -export type IndicesGetResponse = Record - -export interface IndicesGetAliasRequest extends RequestBase { - name?: Names - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean -} - -export type IndicesGetAliasResponse = Record - -export interface IndicesGetAliasIndexAliases { - aliases: Record -} - -export interface IndicesGetAliasNotFoundAliasesKeys { - error: string - status: number -} -export type IndicesGetAliasNotFoundAliases = IndicesGetAliasNotFoundAliasesKeys -& { [property: string]: IndicesGetAliasIndexAliases | string | number } - -export interface IndicesGetDataLifecycleDataStreamWithLifecycle { - name: DataStreamName - lifecycle?: IndicesDataStreamLifecycleWithRollover -} - -export interface IndicesGetDataLifecycleRequest extends RequestBase { - name: DataStreamNames - expand_wildcards?: ExpandWildcards - include_defaults?: boolean - master_timeout?: Duration -} - -export interface IndicesGetDataLifecycleResponse { - data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] -} - -export interface IndicesGetDataLifecycleStatsDataStreamStats { - backing_indices_in_error: integer - backing_indices_in_total: integer - name: DataStreamName -} - -export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { -} - -export interface IndicesGetDataLifecycleStatsResponse { - data_stream_count: integer - data_streams: IndicesGetDataLifecycleStatsDataStreamStats[] - last_run_duration_in_millis?: DurationValue - time_between_starts_in_millis?: DurationValue -} - -export interface IndicesGetDataStreamRequest extends RequestBase { - name?: DataStreamNames - expand_wildcards?: ExpandWildcards - include_defaults?: boolean - master_timeout?: Duration - verbose?: boolean -} - -export interface IndicesGetDataStreamResponse { - data_streams: IndicesDataStream[] -} - -export interface IndicesGetFieldMappingRequest extends RequestBase { - fields: Fields - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean -} - -export type IndicesGetFieldMappingResponse = Record - -export interface IndicesGetFieldMappingTypeFieldMappings { - mappings: Record -} - -export interface IndicesGetIndexTemplateIndexTemplateItem { - name: Name - index_template: IndicesIndexTemplate -} - -export interface IndicesGetIndexTemplateRequest extends RequestBase { - name?: Name - local?: boolean - flat_settings?: boolean - master_timeout?: Duration - include_defaults?: boolean -} - -export interface IndicesGetIndexTemplateResponse { - index_templates: IndicesGetIndexTemplateIndexTemplateItem[] -} - -export interface IndicesGetMappingIndexMappingRecord { - item?: MappingTypeMapping - mappings: MappingTypeMapping -} - -export interface IndicesGetMappingRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - local?: boolean - master_timeout?: Duration -} - -export type IndicesGetMappingResponse = Record - -export interface IndicesGetMigrateReindexStatusRequest extends RequestBase { - index: Indices -} - -export interface IndicesGetMigrateReindexStatusResponse { - start_time?: DateTime - start_time_millis: EpochTime - complete: boolean - total_indices_in_data_stream: integer - total_indices_requiring_upgrade: integer - successes: integer - in_progress: IndicesGetMigrateReindexStatusStatusInProgress[] - pending: integer - errors: IndicesGetMigrateReindexStatusStatusError[] - exception?: string -} - -export interface IndicesGetMigrateReindexStatusStatusError { - index: string - message: string -} - -export interface IndicesGetMigrateReindexStatusStatusInProgress { - index: string - total_doc_count: long - reindexed_doc_count: long -} - -export interface IndicesGetSettingsRequest extends RequestBase { - index?: Indices - name?: Names - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - include_defaults?: boolean - local?: boolean - master_timeout?: Duration -} - -export type IndicesGetSettingsResponse = Record - -export interface IndicesGetTemplateRequest extends RequestBase { - name?: Names - flat_settings?: boolean - local?: boolean - master_timeout?: Duration -} - -export type IndicesGetTemplateResponse = Record - -export interface IndicesMigrateReindexMigrateReindex { - mode: IndicesMigrateReindexModeEnum - source: IndicesMigrateReindexSourceIndex -} - -export type IndicesMigrateReindexModeEnum = 'upgrade' - -export interface IndicesMigrateReindexRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, use 'reindex' instead. */ - body?: IndicesMigrateReindexMigrateReindex -} - -export type IndicesMigrateReindexResponse = AcknowledgedResponseBase - -export interface IndicesMigrateReindexSourceIndex { - index: IndexName -} - -export interface IndicesMigrateToDataStreamRequest extends RequestBase { - name: IndexName - master_timeout?: Duration - timeout?: Duration -} - -export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase - -export interface IndicesModifyDataStreamAction { - add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction - remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction -} - -export interface IndicesModifyDataStreamIndexAndDataStreamAction { - data_stream: DataStreamName - index: IndexName -} - -export interface IndicesModifyDataStreamRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - actions: IndicesModifyDataStreamAction[] - } -} - -export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase - -export interface IndicesOpenRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards -} - -export interface IndicesOpenResponse { - acknowledged: boolean - shards_acknowledged: boolean -} - -export interface IndicesPromoteDataStreamRequest extends RequestBase { - name: IndexName - master_timeout?: Duration -} - -export type IndicesPromoteDataStreamResponse = any - -export interface IndicesPutAliasRequest extends RequestBase { - index: Indices - name: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - filter?: QueryDslQueryContainer - index_routing?: Routing - is_write_index?: boolean - routing?: Routing - search_routing?: Routing - } -} - -export type IndicesPutAliasResponse = AcknowledgedResponseBase - -export interface IndicesPutDataLifecycleRequest extends RequestBase { - name: DataStreamNames - expand_wildcards?: ExpandWildcards - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - data_retention?: Duration - downsampling?: IndicesDataStreamLifecycleDownsampling - enabled?: boolean - } -} - -export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase - -export interface IndicesPutIndexTemplateIndexTemplateMapping { - aliases?: Record - mappings?: MappingTypeMapping - settings?: IndicesIndexSettings - lifecycle?: IndicesDataStreamLifecycle -} - -export interface IndicesPutIndexTemplateRequest extends RequestBase { - name: Name - create?: boolean - master_timeout?: Duration - cause?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - index_patterns?: Indices - composed_of?: Name[] - template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStreamVisibility - priority?: long - version?: VersionNumber - _meta?: Metadata - allow_auto_create?: boolean - ignore_missing_component_templates?: string[] - deprecated?: boolean - } -} - -export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase - -export interface IndicesPutMappingRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - write_index_only?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - date_detection?: boolean - dynamic?: MappingDynamicMapping - dynamic_date_formats?: string[] - dynamic_templates?: Partial>[] - _field_names?: MappingFieldNamesField - _meta?: Metadata - numeric_detection?: boolean - properties?: Record - _routing?: MappingRoutingField - _source?: MappingSourceField - runtime?: MappingRuntimeFields - } -} - -export type IndicesPutMappingResponse = IndicesResponseBase - -export interface IndicesPutSettingsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - flat_settings?: boolean - ignore_unavailable?: boolean - master_timeout?: Duration - preserve_existing?: boolean - reopen?: boolean - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'settings' instead. */ - body?: IndicesIndexSettings -} - -export type IndicesPutSettingsResponse = AcknowledgedResponseBase - -export interface IndicesPutTemplateRequest extends RequestBase { - name: Name - create?: boolean - master_timeout?: Duration - cause?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - index_patterns?: string | string[] - mappings?: MappingTypeMapping - order?: integer - settings?: IndicesIndexSettings - version?: VersionNumber - } -} - -export type IndicesPutTemplateResponse = AcknowledgedResponseBase - -export interface IndicesRecoveryFileDetails { - length: long - name: string - recovered: long -} - -export interface IndicesRecoveryRecoveryBytes { - percent: Percentage - recovered?: ByteSize - recovered_in_bytes: ByteSize - recovered_from_snapshot?: ByteSize - recovered_from_snapshot_in_bytes?: ByteSize - reused?: ByteSize - reused_in_bytes: ByteSize - total?: ByteSize - total_in_bytes: ByteSize -} - -export interface IndicesRecoveryRecoveryFiles { - details?: IndicesRecoveryFileDetails[] - percent: Percentage - recovered: long - reused: long - total: long -} - -export interface IndicesRecoveryRecoveryIndexStatus { - bytes?: IndicesRecoveryRecoveryBytes - files: IndicesRecoveryRecoveryFiles - size: IndicesRecoveryRecoveryBytes - source_throttle_time?: Duration - source_throttle_time_in_millis: DurationValue - target_throttle_time?: Duration - target_throttle_time_in_millis: DurationValue - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface IndicesRecoveryRecoveryOrigin { - hostname?: string - host?: Host - transport_address?: TransportAddress - id?: Id - ip?: Ip - name?: Name - bootstrap_new_history_uuid?: boolean - repository?: Name - snapshot?: Name - version?: VersionString - restoreUUID?: Uuid - index?: IndexName -} - -export interface IndicesRecoveryRecoveryStartStatus { - check_index_time?: Duration - check_index_time_in_millis: DurationValue - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface IndicesRecoveryRecoveryStatus { - shards: IndicesRecoveryShardRecovery[] -} - -export interface IndicesRecoveryRequest extends RequestBase { - index?: Indices - active_only?: boolean - detailed?: boolean - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean -} - -export type IndicesRecoveryResponse = Record - -export interface IndicesRecoveryShardRecovery { - id: long - index: IndicesRecoveryRecoveryIndexStatus - primary: boolean - source: IndicesRecoveryRecoveryOrigin - stage: string - start?: IndicesRecoveryRecoveryStartStatus - start_time?: DateTime - start_time_in_millis: EpochTime - stop_time?: DateTime - stop_time_in_millis?: EpochTime - target: IndicesRecoveryRecoveryOrigin - total_time?: Duration - total_time_in_millis: DurationValue - translog: IndicesRecoveryTranslogStatus - type: string - verify_index: IndicesRecoveryVerifyIndex -} - -export interface IndicesRecoveryTranslogStatus { - percent: Percentage - recovered: long - total: long - total_on_start: long - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface IndicesRecoveryVerifyIndex { - check_index_time?: Duration - check_index_time_in_millis: DurationValue - total_time?: Duration - total_time_in_millis: DurationValue -} - -export interface IndicesRefreshRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean -} - -export type IndicesRefreshResponse = ShardsOperationResponseBase - -export interface IndicesReloadSearchAnalyzersReloadDetails { - index: string - reloaded_analyzers: string[] - reloaded_node_ids: string[] -} - -export interface IndicesReloadSearchAnalyzersReloadResult { - reload_details: IndicesReloadSearchAnalyzersReloadDetails[] - _shards: ShardStatistics -} - -export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { - index: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - resource?: string -} - -export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult - -export interface IndicesResolveClusterRequest extends RequestBase { - name?: Names - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - timeout?: Duration -} - -export interface IndicesResolveClusterResolveClusterInfo { - connected: boolean - skip_unavailable: boolean - matching_indices?: boolean - error?: string - version?: ElasticsearchVersionMinInfo -} - -export type IndicesResolveClusterResponse = Record - -export interface IndicesResolveIndexRequest extends RequestBase { - name: Names - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - allow_no_indices?: boolean -} - -export interface IndicesResolveIndexResolveIndexAliasItem { - name: Name - indices: Indices -} - -export interface IndicesResolveIndexResolveIndexDataStreamsItem { - name: DataStreamName - timestamp_field: Field - backing_indices: Indices -} - -export interface IndicesResolveIndexResolveIndexItem { - name: Name - aliases?: string[] - attributes: string[] - data_stream?: DataStreamName -} - -export interface IndicesResolveIndexResponse { - indices: IndicesResolveIndexResolveIndexItem[] - aliases: IndicesResolveIndexResolveIndexAliasItem[] - data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[] -} - -export interface IndicesRolloverRequest extends RequestBase { - alias: IndexAlias - new_index?: IndexName - dry_run?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - lazy?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - conditions?: IndicesRolloverRolloverConditions - mappings?: MappingTypeMapping - settings?: Record - } -} - -export interface IndicesRolloverResponse { - acknowledged: boolean - conditions: Record - dry_run: boolean - new_index: string - old_index: string - rolled_over: boolean - shards_acknowledged: boolean -} - -export interface IndicesRolloverRolloverConditions { - min_age?: Duration - max_age?: Duration - max_age_millis?: DurationValue - min_docs?: long - max_docs?: long - max_size?: ByteSize - max_size_bytes?: long - min_size?: ByteSize - min_size_bytes?: long - max_primary_shard_size?: ByteSize - max_primary_shard_size_bytes?: long - min_primary_shard_size?: ByteSize - min_primary_shard_size_bytes?: long - max_primary_shard_docs?: long - min_primary_shard_docs?: long -} - -export interface IndicesSegmentsIndexSegment { - shards: Record -} - -export interface IndicesSegmentsRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - verbose?: boolean -} - -export interface IndicesSegmentsResponse { - indices: Record - _shards: ShardStatistics -} - -export interface IndicesSegmentsSegment { - attributes: Record - committed: boolean - compound: boolean - deleted_docs: long - generation: integer - search: boolean - size_in_bytes: double - num_docs: long - version: VersionString -} - -export interface IndicesSegmentsShardSegmentRouting { - node: string - primary: boolean - state: string -} - -export interface IndicesSegmentsShardsSegment { - num_committed_segments: integer - routing: IndicesSegmentsShardSegmentRouting - num_search_segments: integer - segments: Record -} - -export interface IndicesShardStoresIndicesShardStores { - shards: Record -} - -export interface IndicesShardStoresRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[] -} - -export interface IndicesShardStoresResponse { - indices: Record -} - -export interface IndicesShardStoresShardStoreKeys { - allocation: IndicesShardStoresShardStoreAllocation - allocation_id?: Id - store_exception?: IndicesShardStoresShardStoreException -} -export type IndicesShardStoresShardStore = IndicesShardStoresShardStoreKeys -& { [property: string]: IndicesShardStoresShardStoreNode | IndicesShardStoresShardStoreAllocation | Id | IndicesShardStoresShardStoreException } - -export type IndicesShardStoresShardStoreAllocation = 'primary' | 'replica' | 'unused' - -export interface IndicesShardStoresShardStoreException { - reason: string - type: string -} - -export interface IndicesShardStoresShardStoreNode { - attributes: Record - ephemeral_id?: string - external_id?: string - name: Name - roles: string[] - transport_address: TransportAddress -} - -export type IndicesShardStoresShardStoreStatus = 'green' | 'yellow' | 'red' | 'all' - -export interface IndicesShardStoresShardStoreWrapper { - stores: IndicesShardStoresShardStore[] -} - -export interface IndicesShrinkRequest extends RequestBase { - index: IndexName - target: IndexName - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - settings?: Record - } -} - -export interface IndicesShrinkResponse { - acknowledged: boolean - shards_acknowledged: boolean - index: IndexName -} - -export interface IndicesSimulateIndexTemplateRequest extends RequestBase { - name: Name - create?: boolean - cause?: string - master_timeout?: Duration - include_defaults?: boolean -} - -export interface IndicesSimulateIndexTemplateResponse { - overlapping?: IndicesSimulateTemplateOverlapping[] - template: IndicesSimulateTemplateTemplate -} - -export interface IndicesSimulateTemplateOverlapping { - name: Name - index_patterns: string[] -} - -export interface IndicesSimulateTemplateRequest extends RequestBase { - name?: Name - create?: boolean - cause?: string - master_timeout?: Duration - include_defaults?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_auto_create?: boolean - index_patterns?: Indices - composed_of?: Name[] - template?: IndicesPutIndexTemplateIndexTemplateMapping - data_stream?: IndicesDataStreamVisibility - priority?: long - version?: VersionNumber - _meta?: Metadata - ignore_missing_component_templates?: string[] - deprecated?: boolean - } -} - -export interface IndicesSimulateTemplateResponse { - overlapping?: IndicesSimulateTemplateOverlapping[] - template: IndicesSimulateTemplateTemplate -} - -export interface IndicesSimulateTemplateTemplate { - aliases: Record - mappings: MappingTypeMapping - settings: IndicesIndexSettings -} - -export interface IndicesSplitRequest extends RequestBase { - index: IndexName - target: IndexName - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: WaitForActiveShards - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aliases?: Record - settings?: Record - } -} - -export interface IndicesSplitResponse { - acknowledged: boolean - shards_acknowledged: boolean - index: IndexName -} - -export type IndicesStatsIndexMetadataState = 'open' | 'close' - -export interface IndicesStatsIndexStats { - completion?: CompletionStats - docs?: DocStats - fielddata?: FielddataStats - flush?: FlushStats - get?: GetStats - indexing?: IndexingStats - indices?: IndicesStatsIndicesStats - merges?: MergesStats - query_cache?: QueryCacheStats - recovery?: RecoveryStats - refresh?: RefreshStats - request_cache?: RequestCacheStats - search?: SearchStats - segments?: SegmentsStats - store?: StoreStats - translog?: TranslogStats - warmer?: WarmerStats - bulk?: BulkStats - shard_stats?: IndicesStatsShardsTotalStats -} - -export interface IndicesStatsIndicesStats { - primaries?: IndicesStatsIndexStats - shards?: Record - total?: IndicesStatsIndexStats - uuid?: Uuid - health?: HealthStatus - status?: IndicesStatsIndexMetadataState -} - -export interface IndicesStatsMappingStats { - total_count: long - total_estimated_overhead?: ByteSize - total_estimated_overhead_in_bytes: long -} - -export interface IndicesStatsRequest extends RequestBase { - metric?: Metrics - index?: Indices - completion_fields?: Fields - expand_wildcards?: ExpandWildcards - fielddata_fields?: Fields - fields?: Fields - forbid_closed_indices?: boolean - groups?: string | string[] - include_segment_file_sizes?: boolean - include_unloaded_segments?: boolean - level?: Level -} - -export interface IndicesStatsResponse { - indices?: Record - _shards: ShardStatistics - _all: IndicesStatsIndicesStats -} - -export interface IndicesStatsShardCommit { - generation: integer - id: Id - num_docs: long - user_data: Record -} - -export interface IndicesStatsShardFileSizeInfo { - description: string - size_in_bytes: long - min_size_in_bytes?: long - max_size_in_bytes?: long - average_size_in_bytes?: long - count?: long -} - -export interface IndicesStatsShardLease { - id: Id - retaining_seq_no: SequenceNumber - timestamp: long - source: string -} - -export interface IndicesStatsShardPath { - data_path: string - is_custom_data_path: boolean - state_path: string -} - -export interface IndicesStatsShardQueryCache { - cache_count: long - cache_size: long - evictions: long - hit_count: long - memory_size_in_bytes: long - miss_count: long - total_count: long -} - -export interface IndicesStatsShardRetentionLeases { - primary_term: long - version: VersionNumber - leases: IndicesStatsShardLease[] -} - -export interface IndicesStatsShardRouting { - node: string - primary: boolean - relocating_node?: string | null - state: IndicesStatsShardRoutingState -} - -export type IndicesStatsShardRoutingState = 'UNASSIGNED' | 'INITIALIZING' | 'STARTED' | 'RELOCATING' - -export interface IndicesStatsShardSequenceNumber { - global_checkpoint: long - local_checkpoint: long - max_seq_no: SequenceNumber -} - -export interface IndicesStatsShardStats { - commit?: IndicesStatsShardCommit - completion?: CompletionStats - docs?: DocStats - fielddata?: FielddataStats - flush?: FlushStats - get?: GetStats - indexing?: IndexingStats - mappings?: IndicesStatsMappingStats - merges?: MergesStats - shard_path?: IndicesStatsShardPath - query_cache?: IndicesStatsShardQueryCache - recovery?: RecoveryStats - refresh?: RefreshStats - request_cache?: RequestCacheStats - retention_leases?: IndicesStatsShardRetentionLeases - routing?: IndicesStatsShardRouting - search?: SearchStats - segments?: SegmentsStats - seq_no?: IndicesStatsShardSequenceNumber - store?: StoreStats - translog?: TranslogStats - warmer?: WarmerStats - bulk?: BulkStats - shards?: Record - shard_stats?: IndicesStatsShardsTotalStats - indices?: IndicesStatsIndicesStats -} - -export interface IndicesStatsShardsTotalStats { - total_count: long -} - -export interface IndicesUnfreezeRequest extends RequestBase { - index: IndexName - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_unavailable?: boolean - master_timeout?: Duration - timeout?: Duration - wait_for_active_shards?: string -} - -export interface IndicesUnfreezeResponse { - acknowledged: boolean - shards_acknowledged: boolean -} - -export interface IndicesUpdateAliasesAction { - add?: IndicesUpdateAliasesAddAction - remove?: IndicesUpdateAliasesRemoveAction - remove_index?: IndicesUpdateAliasesRemoveIndexAction -} - -export interface IndicesUpdateAliasesAddAction { - alias?: IndexAlias - aliases?: IndexAlias | IndexAlias[] - filter?: QueryDslQueryContainer - index?: IndexName - indices?: Indices - index_routing?: Routing - is_hidden?: boolean - is_write_index?: boolean - routing?: Routing - search_routing?: Routing - must_exist?: boolean -} - -export interface IndicesUpdateAliasesRemoveAction { - alias?: IndexAlias - aliases?: IndexAlias | IndexAlias[] - index?: IndexName - indices?: Indices - must_exist?: boolean -} - -export interface IndicesUpdateAliasesRemoveIndexAction { - index?: IndexName - indices?: Indices - must_exist?: boolean -} - -export interface IndicesUpdateAliasesRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - actions?: IndicesUpdateAliasesAction[] - } -} - -export type IndicesUpdateAliasesResponse = AcknowledgedResponseBase - -export interface IndicesValidateQueryIndicesValidationExplanation { - error?: string - explanation?: string - index: IndexName - valid: boolean -} - -export interface IndicesValidateQueryRequest extends RequestBase { - index?: Indices - allow_no_indices?: boolean - all_shards?: boolean - analyzer?: string - analyze_wildcard?: boolean - default_operator?: QueryDslOperator - df?: string - expand_wildcards?: ExpandWildcards - explain?: boolean - ignore_unavailable?: boolean - lenient?: boolean - rewrite?: boolean - q?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: QueryDslQueryContainer - } -} - -export interface IndicesValidateQueryResponse { - explanations?: IndicesValidateQueryIndicesValidationExplanation[] - _shards?: ShardStatistics - valid: boolean - error?: string -} - -export interface InferenceAdaptiveAllocations { - enabled?: boolean - max_number_of_allocations?: integer - min_number_of_allocations?: integer -} - -export interface InferenceAlibabaCloudServiceSettings { - api_key: string - host: string - rate_limit?: InferenceRateLimitSetting - service_id: string - workspace: string -} - -export type InferenceAlibabaCloudServiceType = 'alibabacloud-ai-search' - -export interface InferenceAlibabaCloudTaskSettings { - input_type?: string - return_token?: boolean -} - -export type InferenceAlibabaCloudTaskType = 'completion' | 'rerank' | 'space_embedding' | 'text_embedding' - -export interface InferenceAmazonBedrockServiceSettings { - access_key: string - model: string - provider?: string - region: string - rate_limit?: InferenceRateLimitSetting - secret_key: string -} - -export type InferenceAmazonBedrockServiceType = 'amazonbedrock' - -export interface InferenceAmazonBedrockTaskSettings { - max_new_tokens?: integer - temperature?: float - top_k?: float - top_p?: float -} - -export type InferenceAmazonBedrockTaskType = 'completion' | 'text_embedding' - -export type InferenceAmazonSageMakerApi = 'openai' | 'elastic' - -export interface InferenceAmazonSageMakerServiceSettings { - access_key: string - endpoint_name: string - api: InferenceAmazonSageMakerApi - region: string - secret_key: string - target_model?: string - target_container_hostname?: string - inference_component_name?: string - batch_size?: integer - dimensions?: integer -} - -export type InferenceAmazonSageMakerServiceType = 'amazon_sagemaker' - -export interface InferenceAmazonSageMakerTaskSettings { - custom_attributes?: string - enable_explanations?: string - inference_id?: string - session_id?: string - target_variant?: string -} - -export interface InferenceAnthropicServiceSettings { - api_key: string - model_id: string - rate_limit?: InferenceRateLimitSetting -} - -export type InferenceAnthropicServiceType = 'anthropic' - -export interface InferenceAnthropicTaskSettings { - max_tokens: integer - temperature?: float - top_k?: integer - top_p?: float -} - -export type InferenceAnthropicTaskType = 'completion' - -export interface InferenceAzureAiStudioServiceSettings { - api_key: string - endpoint_type: string - target: string - provider: string - rate_limit?: InferenceRateLimitSetting -} - -export type InferenceAzureAiStudioServiceType = 'azureaistudio' - -export interface InferenceAzureAiStudioTaskSettings { - do_sample?: float - max_new_tokens?: integer - temperature?: float - top_p?: float - user?: string -} - -export type InferenceAzureAiStudioTaskType = 'completion' | 'text_embedding' - -export interface InferenceAzureOpenAIServiceSettings { - api_key?: string - api_version: string - deployment_id: string - entra_id?: string - rate_limit?: InferenceRateLimitSetting - resource_name: string -} - -export type InferenceAzureOpenAIServiceType = 'azureopenai' - -export interface InferenceAzureOpenAITaskSettings { - user?: string -} - -export type InferenceAzureOpenAITaskType = 'completion' | 'text_embedding' - -export type InferenceCohereEmbeddingType = 'binary' | 'bit' | 'byte' | 'float' | 'int8' - -export type InferenceCohereInputType = 'classification' | 'clustering' | 'ingest' | 'search' - -export interface InferenceCohereServiceSettings { - api_key: string - embedding_type?: InferenceCohereEmbeddingType - model_id?: string - rate_limit?: InferenceRateLimitSetting - similarity?: InferenceCohereSimilarityType -} - -export type InferenceCohereServiceType = 'cohere' - -export type InferenceCohereSimilarityType = 'cosine' | 'dot_product' | 'l2_norm' - -export interface InferenceCohereTaskSettings { - input_type?: InferenceCohereInputType - return_documents?: boolean - top_n?: integer - truncate?: InferenceCohereTruncateType -} - -export type InferenceCohereTaskType = 'completion' | 'rerank' | 'text_embedding' - -export type InferenceCohereTruncateType = 'END' | 'NONE' | 'START' - -export interface InferenceCompletionInferenceResult { - completion: InferenceCompletionResult[] -} - -export interface InferenceCompletionResult { - result: string -} - -export interface InferenceCompletionTool { - type: string - function: InferenceCompletionToolFunction -} - -export interface InferenceCompletionToolChoice { - type: string - function: InferenceCompletionToolChoiceFunction -} - -export interface InferenceCompletionToolChoiceFunction { - name: string -} - -export interface InferenceCompletionToolFunction { - description?: string - name: string - parameters?: any - strict?: boolean -} - -export type InferenceCompletionToolType = string | InferenceCompletionToolChoice - -export interface InferenceContentObject { - text: string - type: string -} - -export interface InferenceCustomRequestParams { - content: string -} - -export interface InferenceCustomResponseParams { - json_parser: any -} - -export interface InferenceCustomServiceSettings { - headers?: any - input_type?: any - query_parameters?: any - request: InferenceCustomRequestParams - response: InferenceCustomResponseParams - secret_parameters: any - url?: string -} - -export type InferenceCustomServiceType = 'custom' - -export interface InferenceCustomTaskSettings { - parameters?: any -} - -export type InferenceCustomTaskType = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion' - -export interface InferenceDeepSeekServiceSettings { - api_key: string - model_id: string - url?: string -} - -export type InferenceDeepSeekServiceType = 'deepseek' - -export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { - pipelines: string[] -} - -export type InferenceDenseByteVector = byte[] - -export type InferenceDenseVector = float[] - -export interface InferenceElasticsearchServiceSettings { - adaptive_allocations?: InferenceAdaptiveAllocations - deployment_id?: string - model_id: string - num_allocations?: integer - num_threads: integer -} - -export type InferenceElasticsearchServiceType = 'elasticsearch' - -export interface InferenceElasticsearchTaskSettings { - return_documents?: boolean -} - -export type InferenceElasticsearchTaskType = 'rerank' | 'sparse_embedding' | 'text_embedding' - -export interface InferenceElserServiceSettings { - adaptive_allocations?: InferenceAdaptiveAllocations - num_allocations: integer - num_threads: integer -} - -export type InferenceElserServiceType = 'elser' - -export type InferenceElserTaskType = 'sparse_embedding' - -export type InferenceGoogleAiServiceType = 'googleaistudio' - -export interface InferenceGoogleAiStudioServiceSettings { - api_key: string - model_id: string - rate_limit?: InferenceRateLimitSetting -} - -export type InferenceGoogleAiStudioTaskType = 'completion' | 'text_embedding' - -export interface InferenceGoogleVertexAIServiceSettings { - location: string - model_id: string - project_id: string - rate_limit?: InferenceRateLimitSetting - service_account_json: string -} - -export type InferenceGoogleVertexAIServiceType = 'googlevertexai' - -export interface InferenceGoogleVertexAITaskSettings { - auto_truncate?: boolean - top_n?: integer -} - -export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion' - -export interface InferenceHuggingFaceServiceSettings { - api_key: string - rate_limit?: InferenceRateLimitSetting - url: string - model_id?: string -} - -export type InferenceHuggingFaceServiceType = 'hugging_face' - -export interface InferenceHuggingFaceTaskSettings { - return_documents?: boolean - top_n?: integer -} - -export type InferenceHuggingFaceTaskType = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding' - -export interface InferenceInferenceChunkingSettings { - max_chunk_size?: integer - overlap?: integer - sentence_overlap?: integer - strategy?: string -} - -export interface InferenceInferenceEndpoint { - chunking_settings?: InferenceInferenceChunkingSettings - service: string - service_settings: InferenceServiceSettings - task_settings?: InferenceTaskSettings -} - -export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskType -} - -export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeAlibabaCloudAI -} - -export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeAmazonBedrock -} - -export interface InferenceInferenceEndpointInfoAmazonSageMaker extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeAmazonSageMaker -} - -export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeAnthropic -} - -export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeAzureAIStudio -} - -export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeAzureOpenAI -} - -export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeCohere -} - -export interface InferenceInferenceEndpointInfoCustom extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeCustom -} - -export interface InferenceInferenceEndpointInfoDeepSeek extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeDeepSeek -} - -export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeELSER -} - -export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeElasticsearch -} - -export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeGoogleAIStudio -} - -export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeGoogleVertexAI -} - -export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeHuggingFace -} - -export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeJinaAi -} - -export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeMistral -} - -export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeOpenAI -} - -export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeVoyageAI -} - -export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint { - inference_id: string - task_type: InferenceTaskTypeWatsonx -} - -export interface InferenceInferenceResult { - text_embedding_bytes?: InferenceTextEmbeddingByteResult[] - text_embedding_bits?: InferenceTextEmbeddingByteResult[] - text_embedding?: InferenceTextEmbeddingResult[] - sparse_embedding?: InferenceSparseEmbeddingResult[] - completion?: InferenceCompletionResult[] - rerank?: InferenceRankedDocument[] -} - -export interface InferenceJinaAIServiceSettings { - api_key: string - model_id?: string - rate_limit?: InferenceRateLimitSetting - similarity?: InferenceJinaAISimilarityType -} - -export type InferenceJinaAIServiceType = 'jinaai' - -export type InferenceJinaAISimilarityType = 'cosine' | 'dot_product' | 'l2_norm' - -export interface InferenceJinaAITaskSettings { - return_documents?: boolean - task?: InferenceJinaAITextEmbeddingTask - top_n?: integer -} - -export type InferenceJinaAITaskType = 'rerank' | 'text_embedding' - -export type InferenceJinaAITextEmbeddingTask = 'classification' | 'clustering' | 'ingest' | 'search' - -export interface InferenceMessage { - content?: InferenceMessageContent - role: string - tool_call_id?: Id - tool_calls?: InferenceToolCall[] -} - -export type InferenceMessageContent = string | InferenceContentObject[] - -export interface InferenceMistralServiceSettings { - api_key: string - max_input_tokens?: integer - model: string - rate_limit?: InferenceRateLimitSetting -} - -export type InferenceMistralServiceType = 'mistral' - -export type InferenceMistralTaskType = 'text_embedding' | 'completion' | 'chat_completion' - -export interface InferenceOpenAIServiceSettings { - api_key: string - dimensions?: integer - model_id: string - organization_id?: string - rate_limit?: InferenceRateLimitSetting - url?: string -} - -export type InferenceOpenAIServiceType = 'openai' - -export interface InferenceOpenAITaskSettings { - user?: string -} - -export type InferenceOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding' - -export interface InferenceRankedDocument { - index: integer - relevance_score: float - text?: string -} - -export interface InferenceRateLimitSetting { - requests_per_minute?: integer -} - -export interface InferenceRequestChatCompletion { - messages: InferenceMessage[] - model?: string - max_completion_tokens?: long - stop?: string[] - temperature?: float - tool_choice?: InferenceCompletionToolType - tools?: InferenceCompletionTool[] - top_p?: float -} - -export interface InferenceRerankedInferenceResult { - rerank: InferenceRankedDocument[] -} - -export type InferenceServiceSettings = any - -export interface InferenceSparseEmbeddingInferenceResult { - sparse_embedding: InferenceSparseEmbeddingResult[] -} - -export interface InferenceSparseEmbeddingResult { - embedding: InferenceSparseVector -} - -export type InferenceSparseVector = Record - -export type InferenceTaskSettings = any - -export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion' - -export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding' - -export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion' - -export type InferenceTaskTypeAmazonSageMaker = 'text_embedding' | 'completion' | 'chat_completion' | 'sparse_embedding' | 'rerank' - -export type InferenceTaskTypeAnthropic = 'completion' - -export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion' - -export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion' - -export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion' - -export type InferenceTaskTypeCustom = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion' - -export type InferenceTaskTypeDeepSeek = 'completion' | 'chat_completion' - -export type InferenceTaskTypeELSER = 'sparse_embedding' - -export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank' - -export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion' - -export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank' - -export type InferenceTaskTypeHuggingFace = 'text_embedding' - -export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank' - -export type InferenceTaskTypeMistral = 'text_embedding' - -export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion' - -export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank' - -export type InferenceTaskTypeWatsonx = 'text_embedding' - -export interface InferenceTextEmbeddingByteResult { - embedding: InferenceDenseByteVector -} - -export interface InferenceTextEmbeddingInferenceResult { - text_embedding_bytes?: InferenceTextEmbeddingByteResult[] - text_embedding_bits?: InferenceTextEmbeddingByteResult[] - text_embedding?: InferenceTextEmbeddingResult[] -} - -export interface InferenceTextEmbeddingResult { - embedding: InferenceDenseVector -} - -export interface InferenceToolCall { - id: Id - function: InferenceToolCallFunction - type: string -} - -export interface InferenceToolCallFunction { - arguments: string - name: string -} - -export interface InferenceVoyageAIServiceSettings { - dimensions?: integer - model_id: string - rate_limit?: InferenceRateLimitSetting - embedding_type?: float -} - -export type InferenceVoyageAIServiceType = 'voyageai' - -export interface InferenceVoyageAITaskSettings { - input_type?: string - return_documents?: boolean - top_k?: integer - truncation?: boolean -} - -export type InferenceVoyageAITaskType = 'text_embedding' | 'rerank' - -export interface InferenceWatsonxServiceSettings { - api_key: string - api_version: string - model_id: string - project_id: string - rate_limit?: InferenceRateLimitSetting - url: string -} - -export type InferenceWatsonxServiceType = 'watsonxai' - -export type InferenceWatsonxTaskType = 'text_embedding' - -export interface InferenceChatCompletionUnifiedRequest extends RequestBase { - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'chat_completion_request' instead. */ - body?: InferenceRequestChatCompletion -} - -export type InferenceChatCompletionUnifiedResponse = StreamResult - -export interface InferenceCompletionRequest extends RequestBase { - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - input: string | string[] - task_settings?: InferenceTaskSettings - } -} - -export type InferenceCompletionResponse = InferenceCompletionInferenceResult - -export interface InferenceDeleteRequest extends RequestBase { - task_type?: InferenceTaskType - inference_id: Id - dry_run?: boolean - force?: boolean -} - -export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult - -export interface InferenceGetRequest extends RequestBase { - task_type?: InferenceTaskType - inference_id?: Id -} - -export interface InferenceGetResponse { - endpoints: InferenceInferenceEndpointInfo[] -} - -export interface InferenceInferenceRequest extends RequestBase { - task_type?: InferenceTaskType - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: string - input: string | string[] - input_type?: string - task_settings?: InferenceTaskSettings - } -} - -export type InferenceInferenceResponse = InferenceInferenceResult - -export interface InferencePutRequest extends RequestBase { - task_type?: InferenceTaskType - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'inference_config' instead. */ - body?: InferenceInferenceEndpoint -} - -export type InferencePutResponse = InferenceInferenceEndpointInfo - -export interface InferencePutAlibabacloudRequest extends RequestBase { - task_type: InferenceAlibabaCloudTaskType - alibabacloud_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceAlibabaCloudServiceType - service_settings: InferenceAlibabaCloudServiceSettings - task_settings?: InferenceAlibabaCloudTaskSettings - } -} - -export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI - -export interface InferencePutAmazonbedrockRequest extends RequestBase { - task_type: InferenceAmazonBedrockTaskType - amazonbedrock_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceAmazonBedrockServiceType - service_settings: InferenceAmazonBedrockServiceSettings - task_settings?: InferenceAmazonBedrockTaskSettings - } -} - -export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock - -export interface InferencePutAmazonsagemakerRequest extends RequestBase { - task_type: InferenceTaskTypeAmazonSageMaker - amazonsagemaker_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceAmazonSageMakerServiceType - service_settings: InferenceAmazonSageMakerServiceSettings - task_settings?: InferenceAmazonSageMakerTaskSettings - } -} - -export type InferencePutAmazonsagemakerResponse = InferenceInferenceEndpointInfoAmazonSageMaker - -export interface InferencePutAnthropicRequest extends RequestBase { - task_type: InferenceAnthropicTaskType - anthropic_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceAnthropicServiceType - service_settings: InferenceAnthropicServiceSettings - task_settings?: InferenceAnthropicTaskSettings - } -} - -export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic - -export interface InferencePutAzureaistudioRequest extends RequestBase { - task_type: InferenceAzureAiStudioTaskType - azureaistudio_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceAzureAiStudioServiceType - service_settings: InferenceAzureAiStudioServiceSettings - task_settings?: InferenceAzureAiStudioTaskSettings - } -} - -export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio - -export interface InferencePutAzureopenaiRequest extends RequestBase { - task_type: InferenceAzureOpenAITaskType - azureopenai_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceAzureOpenAIServiceType - service_settings: InferenceAzureOpenAIServiceSettings - task_settings?: InferenceAzureOpenAITaskSettings - } -} - -export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI - -export interface InferencePutCohereRequest extends RequestBase { - task_type: InferenceCohereTaskType - cohere_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceCohereServiceType - service_settings: InferenceCohereServiceSettings - task_settings?: InferenceCohereTaskSettings - } -} - -export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere - -export interface InferencePutCustomRequest extends RequestBase { - task_type: InferenceCustomTaskType - custom_inference_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceCustomServiceType - service_settings: InferenceCustomServiceSettings - task_settings?: InferenceCustomTaskSettings - } -} - -export type InferencePutCustomResponse = InferenceInferenceEndpointInfoCustom - -export interface InferencePutDeepseekRequest extends RequestBase { - task_type: InferenceTaskTypeDeepSeek - deepseek_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceDeepSeekServiceType - service_settings: InferenceDeepSeekServiceSettings - } -} - -export type InferencePutDeepseekResponse = InferenceInferenceEndpointInfoDeepSeek - -export interface InferencePutElasticsearchRequest extends RequestBase { - task_type: InferenceElasticsearchTaskType - elasticsearch_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceElasticsearchServiceType - service_settings: InferenceElasticsearchServiceSettings - task_settings?: InferenceElasticsearchTaskSettings - } -} - -export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch - -export interface InferencePutElserRequest extends RequestBase { - task_type: InferenceElserTaskType - elser_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceElserServiceType - service_settings: InferenceElserServiceSettings - } -} - -export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER - -export interface InferencePutGoogleaistudioRequest extends RequestBase { - task_type: InferenceGoogleAiStudioTaskType - googleaistudio_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceGoogleAiServiceType - service_settings: InferenceGoogleAiStudioServiceSettings - } -} - -export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio - -export interface InferencePutGooglevertexaiRequest extends RequestBase { - task_type: InferenceGoogleVertexAITaskType - googlevertexai_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceGoogleVertexAIServiceType - service_settings: InferenceGoogleVertexAIServiceSettings - task_settings?: InferenceGoogleVertexAITaskSettings - } -} - -export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI - -export interface InferencePutHuggingFaceRequest extends RequestBase { - task_type: InferenceHuggingFaceTaskType - huggingface_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceHuggingFaceServiceType - service_settings: InferenceHuggingFaceServiceSettings - task_settings?: InferenceHuggingFaceTaskSettings - } -} - -export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace - -export interface InferencePutJinaaiRequest extends RequestBase { - task_type: InferenceJinaAITaskType - jinaai_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceJinaAIServiceType - service_settings: InferenceJinaAIServiceSettings - task_settings?: InferenceJinaAITaskSettings - } -} - -export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi - -export interface InferencePutMistralRequest extends RequestBase { - task_type: InferenceMistralTaskType - mistral_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceMistralServiceType - service_settings: InferenceMistralServiceSettings - } -} - -export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral - -export interface InferencePutOpenaiRequest extends RequestBase { - task_type: InferenceOpenAITaskType - openai_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceOpenAIServiceType - service_settings: InferenceOpenAIServiceSettings - task_settings?: InferenceOpenAITaskSettings - } -} - -export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI - -export interface InferencePutVoyageaiRequest extends RequestBase { - task_type: InferenceVoyageAITaskType - voyageai_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - chunking_settings?: InferenceInferenceChunkingSettings - service: InferenceVoyageAIServiceType - service_settings: InferenceVoyageAIServiceSettings - task_settings?: InferenceVoyageAITaskSettings - } -} - -export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI - -export interface InferencePutWatsonxRequest extends RequestBase { - task_type: InferenceWatsonxTaskType - watsonx_inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - service: InferenceWatsonxServiceType - service_settings: InferenceWatsonxServiceSettings - } -} - -export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx - -export interface InferenceRerankRequest extends RequestBase { - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query: string - input: string | string[] - task_settings?: InferenceTaskSettings - } -} - -export type InferenceRerankResponse = InferenceRerankedInferenceResult - -export interface InferenceSparseEmbeddingRequest extends RequestBase { - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - input: string | string[] - task_settings?: InferenceTaskSettings - } -} - -export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult - -export interface InferenceStreamCompletionRequest extends RequestBase { - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - input: string | string[] - task_settings?: InferenceTaskSettings - } -} - -export type InferenceStreamCompletionResponse = StreamResult - -export interface InferenceTextEmbeddingRequest extends RequestBase { - inference_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - input: string | string[] - task_settings?: InferenceTaskSettings - } -} - -export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult - -export interface InferenceUpdateRequest extends RequestBase { - inference_id: Id - task_type?: InferenceTaskType - /** @deprecated The use of the 'body' key has been deprecated, use 'inference_config' instead. */ - body?: InferenceInferenceEndpoint -} - -export type InferenceUpdateResponse = InferenceInferenceEndpointInfo - -export interface IngestAppendProcessor extends IngestProcessorBase { - field: Field - value: any | any[] - allow_duplicates?: boolean -} - -export interface IngestAttachmentProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - indexed_chars?: long - indexed_chars_field?: Field - properties?: string[] - target_field?: Field - remove_binary?: boolean - resource_name?: string -} - -export interface IngestBytesProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestCircleProcessor extends IngestProcessorBase { - error_distance: double - field: Field - ignore_missing?: boolean - shape_type: IngestShapeType - target_field?: Field -} - -export interface IngestCommunityIDProcessor extends IngestProcessorBase { - source_ip?: Field - source_port?: Field - destination_ip?: Field - destination_port?: Field - iana_number?: Field - icmp_type?: Field - icmp_code?: Field - transport?: Field - target_field?: Field - seed?: integer - ignore_missing?: boolean -} - -export interface IngestConvertProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field - type: IngestConvertType -} - -export type IngestConvertType = 'integer' | 'long' | 'double' | 'float' | 'boolean' | 'ip' | 'string' | 'auto' - -export interface IngestCsvProcessor extends IngestProcessorBase { - empty_value?: any - field: Field - ignore_missing?: boolean - quote?: string - separator?: string - target_fields: Fields - trim?: boolean -} - -export interface IngestDatabaseConfiguration { - name: Name - maxmind?: IngestMaxmind - ipinfo?: IngestIpinfo -} - -export interface IngestDatabaseConfigurationFull { - web?: IngestWeb - local?: IngestLocal - name: Name - maxmind?: IngestMaxmind - ipinfo?: IngestIpinfo -} - -export interface IngestDateIndexNameProcessor extends IngestProcessorBase { - date_formats: string[] - date_rounding: string - field: Field - index_name_format?: string - index_name_prefix?: string - locale?: string - timezone?: string -} - -export interface IngestDateProcessor extends IngestProcessorBase { - field: Field - formats: string[] - locale?: string - target_field?: Field - timezone?: string - output_format?: string -} - -export interface IngestDissectProcessor extends IngestProcessorBase { - append_separator?: string - field: Field - ignore_missing?: boolean - pattern: string -} - -export interface IngestDocument { - _id?: Id - _index?: IndexName - _source: any -} - -export interface IngestDocumentSimulationKeys { - _id: Id - _index: IndexName - _ingest: IngestIngest - _routing?: string - _source: Record - _version?: SpecUtilsStringified - _version_type?: VersionType -} -export type IngestDocumentSimulation = IngestDocumentSimulationKeys -& { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType } - -export interface IngestDotExpanderProcessor extends IngestProcessorBase { - field: Field - override?: boolean - path?: string -} - -export interface IngestDropProcessor extends IngestProcessorBase { -} - -export interface IngestEnrichProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - max_matches?: integer - override?: boolean - policy_name: string - shape_relation?: GeoShapeRelation - target_field: Field -} - -export interface IngestFailProcessor extends IngestProcessorBase { - message: string -} - -export type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3' - -export interface IngestFingerprintProcessor extends IngestProcessorBase { - fields: Fields - target_field?: Field - salt?: string - method?: IngestFingerprintDigest - ignore_missing?: boolean -} - -export interface IngestForeachProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - processor: IngestProcessorContainer -} - -export interface IngestGeoGridProcessor extends IngestProcessorBase { - field: string - tile_type: IngestGeoGridTileType - target_field?: Field - parent_field?: Field - children_field?: Field - non_children_field?: Field - precision_field?: Field - ignore_missing?: boolean - target_format?: IngestGeoGridTargetFormat -} - -export type IngestGeoGridTargetFormat = 'geojson' | 'wkt' - -export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash' - -export interface IngestGeoIpProcessor extends IngestProcessorBase { - database_file?: string - field: Field - first_only?: boolean - ignore_missing?: boolean - properties?: string[] - target_field?: Field - download_database_on_pipeline_creation?: boolean -} - -export interface IngestGrokProcessor extends IngestProcessorBase { - ecs_compatibility?: string - field: Field - ignore_missing?: boolean - pattern_definitions?: Record - patterns: GrokPattern[] - trace_match?: boolean -} - -export interface IngestGsubProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - pattern: string - replacement: string - target_field?: Field -} - -export interface IngestHtmlStripProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestInferenceConfig { - regression?: IngestInferenceConfigRegression - classification?: IngestInferenceConfigClassification -} - -export interface IngestInferenceConfigClassification { - num_top_classes?: integer - num_top_feature_importance_values?: integer - results_field?: Field - top_classes_results_field?: Field - prediction_field_type?: string -} - -export interface IngestInferenceConfigRegression { - results_field?: Field - num_top_feature_importance_values?: integer -} - -export interface IngestInferenceProcessor extends IngestProcessorBase { - model_id: Id - target_field?: Field - field_map?: Record - inference_config?: IngestInferenceConfig - input_output?: IngestInputConfig | IngestInputConfig[] - ignore_missing?: boolean -} - -export interface IngestIngest { - _redact?: IngestRedact - timestamp: DateTime - pipeline?: Name -} - -export interface IngestInputConfig { - input_field: string - output_field: string -} - -export interface IngestIpLocationProcessor extends IngestProcessorBase { - database_file?: string - field: Field - first_only?: boolean - ignore_missing?: boolean - properties?: string[] - target_field?: Field - download_database_on_pipeline_creation?: boolean -} - -export interface IngestIpinfo { -} - -export interface IngestJoinProcessor extends IngestProcessorBase { - field: Field - separator: string - target_field?: Field -} - -export interface IngestJsonProcessor extends IngestProcessorBase { - add_to_root?: boolean - add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy - allow_duplicate_keys?: boolean - field: Field - target_field?: Field -} - -export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' - -export interface IngestKeyValueProcessor extends IngestProcessorBase { - exclude_keys?: string[] - field: Field - field_split: string - ignore_missing?: boolean - include_keys?: string[] - prefix?: string - strip_brackets?: boolean - target_field?: Field - trim_key?: string - trim_value?: string - value_split: string -} - -export interface IngestLocal { - type: string -} - -export interface IngestLowercaseProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestMaxmind { - account_id: Id -} - -export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { - source_ip?: Field - destination_ip?: Field - target_field?: Field - internal_networks?: string[] - internal_networks_field?: Field - ignore_missing?: boolean -} - -export interface IngestPipeline { - description?: string - on_failure?: IngestProcessorContainer[] - processors?: IngestProcessorContainer[] - version?: VersionNumber - deprecated?: boolean - _meta?: Metadata -} - -export interface IngestPipelineConfig { - description?: string - version?: VersionNumber - processors: IngestProcessorContainer[] -} - -export interface IngestPipelineProcessor extends IngestProcessorBase { - name: Name - ignore_missing_pipeline?: boolean -} - -export interface IngestPipelineProcessorResult { - doc?: IngestDocumentSimulation - tag?: string - processor_type?: string - status?: IngestPipelineSimulationStatusOptions - description?: string - ignored_error?: ErrorCause - error?: ErrorCause -} - -export type IngestPipelineSimulationStatusOptions = 'success' | 'error' | 'error_ignored' | 'skipped' | 'dropped' - -export interface IngestProcessorBase { - description?: string - if?: string - ignore_failure?: boolean - on_failure?: IngestProcessorContainer[] - tag?: string -} - -export interface IngestProcessorContainer { - append?: IngestAppendProcessor - attachment?: IngestAttachmentProcessor - bytes?: IngestBytesProcessor - circle?: IngestCircleProcessor - community_id?: IngestCommunityIDProcessor - convert?: IngestConvertProcessor - csv?: IngestCsvProcessor - date?: IngestDateProcessor - date_index_name?: IngestDateIndexNameProcessor - dissect?: IngestDissectProcessor - dot_expander?: IngestDotExpanderProcessor - drop?: IngestDropProcessor - enrich?: IngestEnrichProcessor - fail?: IngestFailProcessor - fingerprint?: IngestFingerprintProcessor - foreach?: IngestForeachProcessor - ip_location?: IngestIpLocationProcessor - geo_grid?: IngestGeoGridProcessor - geoip?: IngestGeoIpProcessor - grok?: IngestGrokProcessor - gsub?: IngestGsubProcessor - html_strip?: IngestHtmlStripProcessor - inference?: IngestInferenceProcessor - join?: IngestJoinProcessor - json?: IngestJsonProcessor - kv?: IngestKeyValueProcessor - lowercase?: IngestLowercaseProcessor - network_direction?: IngestNetworkDirectionProcessor - pipeline?: IngestPipelineProcessor - redact?: IngestRedactProcessor - registered_domain?: IngestRegisteredDomainProcessor - remove?: IngestRemoveProcessor - rename?: IngestRenameProcessor - reroute?: IngestRerouteProcessor - script?: IngestScriptProcessor - set?: IngestSetProcessor - set_security_user?: IngestSetSecurityUserProcessor - sort?: IngestSortProcessor - split?: IngestSplitProcessor - terminate?: IngestTerminateProcessor - trim?: IngestTrimProcessor - uppercase?: IngestUppercaseProcessor - urldecode?: IngestUrlDecodeProcessor - uri_parts?: IngestUriPartsProcessor - user_agent?: IngestUserAgentProcessor -} - -export interface IngestRedact { - _is_redacted: boolean -} - -export interface IngestRedactProcessor extends IngestProcessorBase { - field: Field - patterns: GrokPattern[] - pattern_definitions?: Record - prefix?: string - suffix?: string - ignore_missing?: boolean - skip_if_unlicensed?: boolean - trace_redact?: boolean -} - -export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { - field: Field - target_field?: Field - ignore_missing?: boolean -} - -export interface IngestRemoveProcessor extends IngestProcessorBase { - field: Fields - keep?: Fields - ignore_missing?: boolean -} - -export interface IngestRenameProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field: Field -} - -export interface IngestRerouteProcessor extends IngestProcessorBase { - destination?: string - dataset?: string | string[] - namespace?: string | string[] -} - -export interface IngestScriptProcessor extends IngestProcessorBase { - id?: Id - lang?: string - params?: Record - source?: string -} - -export interface IngestSetProcessor extends IngestProcessorBase { - copy_from?: Field - field: Field - ignore_empty_value?: boolean - media_type?: string - override?: boolean - value?: any -} - -export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { - field: Field - properties?: string[] -} - -export type IngestShapeType = 'geo_shape' | 'shape' - -export interface IngestSimulateDocumentResult { - doc?: IngestDocumentSimulation - error?: ErrorCause - processor_results?: IngestPipelineProcessorResult[] -} - -export interface IngestSortProcessor extends IngestProcessorBase { - field: Field - order?: SortOrder - target_field?: Field -} - -export interface IngestSplitProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - preserve_trailing?: boolean - separator: string - target_field?: Field -} - -export interface IngestTerminateProcessor extends IngestProcessorBase { -} - -export interface IngestTrimProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestUppercaseProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestUriPartsProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - keep_original?: boolean - remove_if_successful?: boolean - target_field?: Field -} - -export interface IngestUrlDecodeProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - target_field?: Field -} - -export interface IngestUserAgentProcessor extends IngestProcessorBase { - field: Field - ignore_missing?: boolean - regex_file?: string - target_field?: Field - properties?: IngestUserAgentProperty[] - extract_device_type?: boolean -} - -export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' - -export interface IngestWeb { -} - -export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { - id: Ids - master_timeout?: Duration - timeout?: Duration -} - -export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase - -export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { - id: Ids - master_timeout?: Duration - timeout?: Duration -} - -export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase - -export interface IngestDeletePipelineRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration -} - -export type IngestDeletePipelineResponse = AcknowledgedResponseBase - -export interface IngestGeoIpStatsGeoIpDownloadStatistics { - successful_downloads: integer - failed_downloads: integer - total_download_time: DurationValue - databases_count: integer - skipped_updates: integer - expired_databases: integer -} - -export interface IngestGeoIpStatsGeoIpNodeDatabaseName { - name: Name -} - -export interface IngestGeoIpStatsGeoIpNodeDatabases { - databases: IngestGeoIpStatsGeoIpNodeDatabaseName[] - files_in_temp: string[] -} - -export interface IngestGeoIpStatsRequest extends RequestBase { -} - -export interface IngestGeoIpStatsResponse { - stats: IngestGeoIpStatsGeoIpDownloadStatistics - nodes: Record -} - -export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { - id: Id - version: long - modified_date_millis: EpochTime - database: IngestDatabaseConfiguration -} - -export interface IngestGetGeoipDatabaseRequest extends RequestBase { - id?: Ids -} - -export interface IngestGetGeoipDatabaseResponse { - databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] -} - -export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { - id: Id - version: VersionNumber - modified_date_millis?: EpochTime - modified_date?: EpochTime - database: IngestDatabaseConfigurationFull -} - -export interface IngestGetIpLocationDatabaseRequest extends RequestBase { - id?: Ids -} - -export interface IngestGetIpLocationDatabaseResponse { - databases: IngestGetIpLocationDatabaseDatabaseConfigurationMetadata[] -} - -export interface IngestGetPipelineRequest extends RequestBase { - id?: Id - master_timeout?: Duration - summary?: boolean -} - -export type IngestGetPipelineResponse = Record - -export interface IngestProcessorGrokRequest extends RequestBase { -} - -export interface IngestProcessorGrokResponse { - patterns: Record -} - -export interface IngestPutGeoipDatabaseRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - name: Name - maxmind: IngestMaxmind - } -} - -export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase - -export interface IngestPutIpLocationDatabaseRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'configuration' instead. */ - body?: IngestDatabaseConfiguration -} - -export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase - -export interface IngestPutPipelineRequest extends RequestBase { - id: Id - master_timeout?: Duration - timeout?: Duration - if_version?: VersionNumber - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - _meta?: Metadata - description?: string - on_failure?: IngestProcessorContainer[] - processors?: IngestProcessorContainer[] - version?: VersionNumber - deprecated?: boolean - } -} - -export type IngestPutPipelineResponse = AcknowledgedResponseBase - -export interface IngestSimulateRequest extends RequestBase { - id?: Id - verbose?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs: IngestDocument[] - pipeline?: IngestPipeline - } -} - -export interface IngestSimulateResponse { - docs: IngestSimulateDocumentResult[] -} - -export interface LicenseLicense { - expiry_date_in_millis: EpochTime - issue_date_in_millis: EpochTime - start_date_in_millis?: EpochTime - issued_to: string - issuer: string - max_nodes?: long | null - max_resource_units?: long - signature: string - type: LicenseLicenseType - uid: string -} - -export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' - -export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' - -export interface LicenseDeleteRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export type LicenseDeleteResponse = AcknowledgedResponseBase - -export interface LicenseGetLicenseInformation { - expiry_date?: DateTime - expiry_date_in_millis?: EpochTime - issue_date: DateTime - issue_date_in_millis: EpochTime - issued_to: string - issuer: string - max_nodes: long | null - max_resource_units?: integer | null - status: LicenseLicenseStatus - type: LicenseLicenseType - uid: Uuid - start_date_in_millis: EpochTime -} - -export interface LicenseGetRequest extends RequestBase { - accept_enterprise?: boolean - local?: boolean -} - -export interface LicenseGetResponse { - license: LicenseGetLicenseInformation -} - -export interface LicenseGetBasicStatusRequest extends RequestBase { -} - -export interface LicenseGetBasicStatusResponse { - eligible_to_start_basic: boolean -} - -export interface LicenseGetTrialStatusRequest extends RequestBase { -} - -export interface LicenseGetTrialStatusResponse { - eligible_to_start_trial: boolean -} - -export interface LicensePostAcknowledgement { - license: string[] - message: string -} - -export interface LicensePostRequest extends RequestBase { - acknowledge?: boolean - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - license?: LicenseLicense - licenses?: LicenseLicense[] - } -} - -export interface LicensePostResponse { - acknowledge?: LicensePostAcknowledgement - acknowledged: boolean - license_status: LicenseLicenseStatus -} - -export interface LicensePostStartBasicRequest extends RequestBase { - acknowledge?: boolean - master_timeout?: Duration - timeout?: Duration -} - -export interface LicensePostStartBasicResponse { - acknowledged: boolean - basic_was_started: boolean - error_message?: string - type?: LicenseLicenseType - acknowledge?: Record -} - -export interface LicensePostStartTrialRequest extends RequestBase { - acknowledge?: boolean - type?: string - master_timeout?: Duration -} - -export interface LicensePostStartTrialResponse { - acknowledged: boolean - error_message?: string - trial_was_started: boolean - type?: LicenseLicenseType -} - -export interface LogstashPipeline { - description: string - last_modified: DateTime - pipeline: string - pipeline_metadata: LogstashPipelineMetadata - pipeline_settings: LogstashPipelineSettings - username: string -} - -export interface LogstashPipelineMetadata { - type: string - version: string -} - -export interface LogstashPipelineSettings { - 'pipeline.workers': integer - 'pipeline.batch.size': integer - 'pipeline.batch.delay': integer - 'queue.type': string - 'queue.max_bytes': string - 'queue.checkpoint.writes': integer -} - -export interface LogstashDeletePipelineRequest extends RequestBase { - id: Id -} - -export type LogstashDeletePipelineResponse = boolean - -export interface LogstashGetPipelineRequest extends RequestBase { - id?: Ids -} - -export type LogstashGetPipelineResponse = Record - -export interface LogstashPutPipelineRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, use 'pipeline' instead. */ - body?: LogstashPipeline -} - -export type LogstashPutPipelineResponse = boolean - -export interface MigrationDeprecationsDeprecation { - details?: string - level: MigrationDeprecationsDeprecationLevel - message: string - url: string - resolve_during_rolling_upgrade: boolean - _meta?: Record -} - -export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' - -export interface MigrationDeprecationsRequest extends RequestBase { - index?: IndexName -} - -export interface MigrationDeprecationsResponse { - cluster_settings: MigrationDeprecationsDeprecation[] - index_settings: Record - data_streams: Record - node_settings: MigrationDeprecationsDeprecation[] - ml_settings: MigrationDeprecationsDeprecation[] - templates: Record - ilm_policies: Record -} - -export interface MigrationGetFeatureUpgradeStatusMigrationFeature { - feature_name: string - minimum_index_version: VersionString - migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus - indices: MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo[] -} - -export interface MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo { - index: IndexName - version: VersionString - failure_cause?: ErrorCause -} - -export type MigrationGetFeatureUpgradeStatusMigrationStatus = 'NO_MIGRATION_NEEDED' | 'MIGRATION_NEEDED' | 'IN_PROGRESS' | 'ERROR' - -export interface MigrationGetFeatureUpgradeStatusRequest extends RequestBase { -} - -export interface MigrationGetFeatureUpgradeStatusResponse { - features: MigrationGetFeatureUpgradeStatusMigrationFeature[] - migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus -} - -export interface MigrationPostFeatureUpgradeMigrationFeature { - feature_name: string -} - -export interface MigrationPostFeatureUpgradeRequest extends RequestBase { -} - -export interface MigrationPostFeatureUpgradeResponse { - accepted: boolean - features?: MigrationPostFeatureUpgradeMigrationFeature[] - reason?: string -} - -export interface MlAdaptiveAllocationsSettings { - enabled: boolean - min_number_of_allocations?: integer - max_number_of_allocations?: integer -} - -export interface MlAnalysisConfig { - bucket_span?: Duration - categorization_analyzer?: MlCategorizationAnalyzer - categorization_field_name?: Field - categorization_filters?: string[] - detectors: MlDetector[] - influencers?: Field[] - latency?: Duration - model_prune_window?: Duration - multivariate_by_fields?: boolean - per_partition_categorization?: MlPerPartitionCategorization - summary_count_field_name?: Field -} - -export interface MlAnalysisConfigRead { - bucket_span: Duration - categorization_analyzer?: MlCategorizationAnalyzer - categorization_field_name?: Field - categorization_filters?: string[] - detectors: MlDetectorRead[] - influencers: Field[] - model_prune_window?: Duration - latency?: Duration - multivariate_by_fields?: boolean - per_partition_categorization?: MlPerPartitionCategorization - summary_count_field_name?: Field -} - -export interface MlAnalysisLimits { - categorization_examples_limit?: long - model_memory_limit?: ByteSize -} - -export interface MlAnalysisMemoryLimit { - model_memory_limit: string -} - -export interface MlAnomaly { - actual?: double[] - anomaly_score_explanation?: MlAnomalyExplanation - bucket_span: DurationValue - by_field_name?: string - by_field_value?: string - causes?: MlAnomalyCause[] - detector_index: integer - field_name?: string - function?: string - function_description?: string - geo_results?: MlGeoResults - influencers?: MlInfluence[] - initial_record_score: double - is_interim: boolean - job_id: string - over_field_name?: string - over_field_value?: string - partition_field_name?: string - partition_field_value?: string - probability: double - record_score: double - result_type: string - timestamp: EpochTime - typical?: double[] -} - -export interface MlAnomalyCause { - actual?: double[] - by_field_name?: Name - by_field_value?: string - correlated_by_field_value?: string - field_name?: Field - function?: string - function_description?: string - geo_results?: MlGeoResults - influencers?: MlInfluence[] - over_field_name?: Name - over_field_value?: string - partition_field_name?: string - partition_field_value?: string - probability: double - typical?: double[] -} - -export interface MlAnomalyExplanation { - anomaly_characteristics_impact?: integer - anomaly_length?: integer - anomaly_type?: string - high_variance_penalty?: boolean - incomplete_bucket_penalty?: boolean - lower_confidence_bound?: double - multi_bucket_impact?: integer - single_bucket_impact?: integer - typical_value?: double - upper_confidence_bound?: double -} - -export interface MlApiKeyAuthorization { - id: string - name: string -} - -export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' - -export interface MlBucketInfluencer { - anomaly_score: double - bucket_span: DurationValue - influencer_field_name: Field - initial_anomaly_score: double - is_interim: boolean - job_id: Id - probability: double - raw_anomaly_score: double - result_type: string - timestamp: EpochTime - timestamp_string?: DateTime -} - -export interface MlBucketSummary { - anomaly_score: double - bucket_influencers: MlBucketInfluencer[] - bucket_span: DurationValue - event_count: long - initial_anomaly_score: double - is_interim: boolean - job_id: Id - processing_time_ms: DurationValue - result_type: string - timestamp: EpochTime - timestamp_string?: DateTime -} - -export interface MlCalendarEvent { - calendar_id?: Id - event_id?: Id - description: string - end_time: DateTime - start_time: DateTime -} - -export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition - -export interface MlCategorizationAnalyzerDefinition { - char_filter?: AnalysisCharFilter[] - filter?: AnalysisTokenFilter[] - tokenizer?: AnalysisTokenizer -} - -export type MlCategorizationStatus = 'ok' | 'warn' - -export interface MlCategory { - category_id: ulong - examples: string[] - grok_pattern?: GrokPattern - job_id: Id - max_matching_length: ulong - partition_field_name?: string - partition_field_value?: string - regex: string - terms: string - num_matches?: long - preferred_to_categories?: Id[] - p?: string - result_type: string - mlcategory: string -} - -export interface MlChunkingConfig { - mode: MlChunkingMode - time_span?: Duration -} - -export type MlChunkingMode = 'auto' | 'manual' | 'off' - -export interface MlClassificationInferenceOptions { - num_top_classes?: integer - num_top_feature_importance_values?: integer - prediction_field_type?: string - results_field?: string - top_classes_results_field?: string -} - -export interface MlCommonTokenizationConfig { - do_lower_case?: boolean - max_sequence_length?: integer - span?: integer - truncate?: MlTokenizationTruncate - with_special_tokens?: boolean -} - -export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' - -export type MlCustomSettings = any - -export interface MlDataCounts { - bucket_count: long - earliest_record_timestamp?: long - empty_bucket_count: long - input_bytes: long - input_field_count: long - input_record_count: long - invalid_date_count: long - job_id: Id - last_data_time?: long - latest_empty_bucket_timestamp?: long - latest_record_timestamp?: long - latest_sparse_bucket_timestamp?: long - latest_bucket_timestamp?: long - log_time?: long - missing_field_count: long - out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long - sparse_bucket_count: long -} - -export interface MlDataDescription { - format?: string - time_field?: Field - time_format?: string - field_delimiter?: string -} - -export interface MlDatafeed { - aggregations?: Record - aggs?: Record - authorization?: MlDatafeedAuthorization - chunking_config?: MlChunkingConfig - datafeed_id: Id - frequency?: Duration - indices: string[] - indexes?: string[] - job_id: Id - max_empty_searches?: integer - query: QueryDslQueryContainer - query_delay?: Duration - script_fields?: Record - scroll_size?: integer - delayed_data_check_config: MlDelayedDataCheckConfig - runtime_mappings?: MappingRuntimeFields - indices_options?: IndicesOptions -} - -export interface MlDatafeedAuthorization { - api_key?: MlApiKeyAuthorization - roles?: string[] - service_account?: string -} - -export interface MlDatafeedConfig { - aggregations?: Record - aggs?: Record - chunking_config?: MlChunkingConfig - datafeed_id?: Id - delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Duration - indices?: Indices - indexes?: Indices - indices_options?: IndicesOptions - job_id?: Id - max_empty_searches?: integer - query?: QueryDslQueryContainer - query_delay?: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size?: integer -} - -export interface MlDatafeedRunningState { - real_time_configured: boolean - real_time_running: boolean - search_interval?: MlRunningStateSearchInterval -} - -export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' - -export interface MlDatafeedStats { - assignment_explanation?: string - datafeed_id: Id - node?: MlDiscoveryNodeCompact - state: MlDatafeedState - timing_stats?: MlDatafeedTimingStats - running_state?: MlDatafeedRunningState -} - -export interface MlDatafeedTimingStats { - bucket_count: long - exponential_average_search_time_per_hour_ms: DurationValue - exponential_average_calculation_context?: MlExponentialAverageCalculationContext - job_id: Id - search_count: long - total_search_time_ms: DurationValue - average_search_time_per_bucket_ms?: DurationValue -} - -export interface MlDataframeAnalysis { - alpha?: double - dependent_variable: string - downsample_factor?: double - early_stopping_enabled?: boolean - eta?: double - eta_growth_rate_per_tree?: double - feature_bag_fraction?: double - feature_processors?: MlDataframeAnalysisFeatureProcessor[] - gamma?: double - lambda?: double - max_optimization_rounds_per_hyperparameter?: integer - max_trees?: integer - maximum_number_trees?: integer - num_top_feature_importance_values?: integer - prediction_field_name?: Field - randomize_seed?: double - soft_tree_depth_limit?: integer - soft_tree_depth_tolerance?: double - training_percent?: Percentage -} - -export interface MlDataframeAnalysisAnalyzedFields { - includes: string[] - excludes: string[] -} - -export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { - class_assignment_objective?: string - num_top_classes?: integer -} - -export interface MlDataframeAnalysisContainer { - classification?: MlDataframeAnalysisClassification - outlier_detection?: MlDataframeAnalysisOutlierDetection - regression?: MlDataframeAnalysisRegression -} - -export interface MlDataframeAnalysisFeatureProcessor { - frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding - multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding - n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding - one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding - target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding -} - -export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { - feature_name: Name - field: Field - frequency_map: Record -} - -export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { - processors: integer[] -} - -export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { - feature_prefix?: string - field: Field - length?: integer - n_grams: integer[] - start?: integer - custom?: boolean -} - -export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { - field: Field - hot_map: string -} - -export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { - default_value: integer - feature_name: Name - field: Field - target_map: Record -} - -export interface MlDataframeAnalysisOutlierDetection { - compute_feature_influence?: boolean - feature_influence_threshold?: double - method?: string - n_neighbors?: integer - outlier_fraction?: double - standardization_enabled?: boolean -} - -export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { - loss_function?: string - loss_function_parameter?: double -} - -export interface MlDataframeAnalytics { - analysis_stats?: MlDataframeAnalyticsStatsContainer - assignment_explanation?: string - data_counts: MlDataframeAnalyticsStatsDataCounts - id: Id - memory_usage: MlDataframeAnalyticsStatsMemoryUsage - node?: NodeAttributes - progress: MlDataframeAnalyticsStatsProgress[] - state: MlDataframeState -} - -export interface MlDataframeAnalyticsAuthorization { - api_key?: MlApiKeyAuthorization - roles?: string[] - service_account?: string -} - -export interface MlDataframeAnalyticsDestination { - index: IndexName - results_field?: Field -} - -export interface MlDataframeAnalyticsFieldSelection { - is_included: boolean - is_required: boolean - feature_type?: string - mapping_types: string[] - name: Field - reason?: string -} - -export interface MlDataframeAnalyticsMemoryEstimation { - expected_memory_with_disk: string - expected_memory_without_disk: string -} - -export interface MlDataframeAnalyticsSource { - index: Indices - query?: QueryDslQueryContainer - runtime_mappings?: MappingRuntimeFields - _source?: MlDataframeAnalysisAnalyzedFields | string[] -} - -export interface MlDataframeAnalyticsStatsContainer { - classification_stats?: MlDataframeAnalyticsStatsHyperparameters - outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection - regression_stats?: MlDataframeAnalyticsStatsHyperparameters -} - -export interface MlDataframeAnalyticsStatsDataCounts { - skipped_docs_count: integer - test_docs_count: integer - training_docs_count: integer -} - -export interface MlDataframeAnalyticsStatsHyperparameters { - hyperparameters: MlHyperparameters - iteration: integer - timestamp: EpochTime - timing_stats: MlTimingStats - validation_loss: MlValidationLoss -} - -export interface MlDataframeAnalyticsStatsMemoryUsage { - memory_reestimate_bytes?: long - peak_usage_bytes: long - status: string - timestamp?: EpochTime -} - -export interface MlDataframeAnalyticsStatsOutlierDetection { - parameters: MlOutlierDetectionParameters - timestamp: EpochTime - timing_stats: MlTimingStats -} - -export interface MlDataframeAnalyticsStatsProgress { - phase: string - progress_percent: integer -} - -export interface MlDataframeAnalyticsSummary { - allow_lazy_start?: boolean - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - authorization?: MlDataframeAnalyticsAuthorization - create_time?: EpochTime - description?: string - dest: MlDataframeAnalyticsDestination - id: Id - max_num_threads?: integer - model_memory_limit?: string - source: MlDataframeAnalyticsSource - version?: VersionString - _meta?: Metadata -} - -export interface MlDataframeEvaluationClassification { - actual_field: Field - predicted_field?: Field - top_classes_field?: Field - metrics?: MlDataframeEvaluationClassificationMetrics -} - -export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { - accuracy?: Record - multiclass_confusion_matrix?: Record -} - -export interface MlDataframeEvaluationClassificationMetricsAucRoc { - class_name?: Name - include_curve?: boolean -} - -export interface MlDataframeEvaluationContainer { - classification?: MlDataframeEvaluationClassification - outlier_detection?: MlDataframeEvaluationOutlierDetection - regression?: MlDataframeEvaluationRegression -} - -export interface MlDataframeEvaluationMetrics { - auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc - precision?: Record - recall?: Record -} - -export interface MlDataframeEvaluationOutlierDetection { - actual_field: Field - predicted_probability_field: Field - metrics?: MlDataframeEvaluationOutlierDetectionMetrics -} - -export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { - confusion_matrix?: Record -} - -export interface MlDataframeEvaluationRegression { - actual_field: Field - predicted_field: Field - metrics?: MlDataframeEvaluationRegressionMetrics -} - -export interface MlDataframeEvaluationRegressionMetrics { - mse?: Record - msle?: MlDataframeEvaluationRegressionMetricsMsle - huber?: MlDataframeEvaluationRegressionMetricsHuber - r_squared?: Record -} - -export interface MlDataframeEvaluationRegressionMetricsHuber { - delta?: double -} - -export interface MlDataframeEvaluationRegressionMetricsMsle { - offset?: double -} - -export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' - -export interface MlDelayedDataCheckConfig { - check_window?: Duration - enabled: boolean -} - -export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' - -export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' - -export interface MlDetectionRule { - actions?: MlRuleAction[] - conditions?: MlRuleCondition[] - scope?: Record -} - -export interface MlDetector { - by_field_name?: Field - custom_rules?: MlDetectionRule[] - detector_description?: string - detector_index?: integer - exclude_frequent?: MlExcludeFrequent - field_name?: Field - function?: string - over_field_name?: Field - partition_field_name?: Field - use_null?: boolean -} - -export interface MlDetectorRead { - by_field_name?: Field - custom_rules?: MlDetectionRule[] - detector_description?: string - detector_index?: integer - exclude_frequent?: MlExcludeFrequent - field_name?: Field - function: string - over_field_name?: Field - partition_field_name?: Field - use_null?: boolean -} - -export interface MlDetectorUpdate { - detector_index: integer - description?: string - custom_rules?: MlDetectionRule[] -} - -export type MlDiscoveryNode = Partial> - -export interface MlDiscoveryNodeCompact { - name: Name - ephemeral_id: Id - id: Id - transport_address: TransportAddress - attributes: Record -} - -export interface MlDiscoveryNodeContent { - name?: Name - ephemeral_id: Id - transport_address: TransportAddress - external_id: string - attributes: Record - roles: string[] - version: VersionString - min_index_version: integer - max_index_version: integer -} - -export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' - -export interface MlExponentialAverageCalculationContext { - incremental_metric_value_ms: DurationValue - latest_timestamp?: EpochTime - previous_exponential_average_ms?: DurationValue -} - -export type MlFeatureExtractor = MlQueryFeatureExtractor - -export interface MlFillMaskInferenceOptions { - mask_token?: string - num_top_classes?: integer - tokenization?: MlTokenizationConfigContainer - results_field?: string - vocabulary: MlVocabulary -} - -export interface MlFillMaskInferenceUpdateOptions { - num_top_classes?: integer - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlFilter { - description?: string - filter_id: Id - items: string[] -} - -export interface MlFilterRef { - filter_id: Id - filter_type?: MlFilterType -} - -export type MlFilterType = 'include' | 'exclude' - -export interface MlGeoResults { - actual_point?: string - typical_point?: string -} - -export interface MlHyperparameter { - absolute_importance?: double - name: Name - relative_importance?: double - supplied: boolean - value: double -} - -export interface MlHyperparameters { - alpha?: double - lambda?: double - gamma?: double - eta?: double - eta_growth_rate_per_tree?: double - feature_bag_fraction?: double - downsample_factor?: double - max_attempts_to_add_tree?: integer - max_optimization_rounds_per_hyperparameter?: integer - max_trees?: integer - num_folds?: integer - num_splits_per_feature?: integer - soft_tree_depth_limit?: integer - soft_tree_depth_tolerance?: double -} - -export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' - -export interface MlInferenceConfigCreateContainer { - regression?: MlRegressionInferenceOptions - classification?: MlClassificationInferenceOptions - text_classification?: MlTextClassificationInferenceOptions - zero_shot_classification?: MlZeroShotClassificationInferenceOptions - fill_mask?: MlFillMaskInferenceOptions - learning_to_rank?: MlLearningToRankConfig - ner?: MlNerInferenceOptions - pass_through?: MlPassThroughInferenceOptions - text_embedding?: MlTextEmbeddingInferenceOptions - text_expansion?: MlTextExpansionInferenceOptions - question_answering?: MlQuestionAnsweringInferenceOptions -} - -export interface MlInferenceConfigUpdateContainer { - regression?: MlRegressionInferenceOptions - classification?: MlClassificationInferenceOptions - text_classification?: MlTextClassificationInferenceUpdateOptions - zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions - fill_mask?: MlFillMaskInferenceUpdateOptions - ner?: MlNerInferenceUpdateOptions - pass_through?: MlPassThroughInferenceUpdateOptions - text_embedding?: MlTextEmbeddingInferenceUpdateOptions - text_expansion?: MlTextExpansionInferenceUpdateOptions - question_answering?: MlQuestionAnsweringInferenceUpdateOptions -} - -export interface MlInferenceResponseResult { - entities?: MlTrainedModelEntities[] - is_truncated?: boolean - predicted_value?: MlPredictedValue | MlPredictedValue[] - predicted_value_sequence?: string - prediction_probability?: double - prediction_score?: double - top_classes?: MlTopClassEntry[] - warning?: string - feature_importance?: MlTrainedModelInferenceFeatureImportance[] -} - -export interface MlInfluence { - influencer_field_name: string - influencer_field_values: string[] -} - -export interface MlInfluencer { - bucket_span: DurationValue - influencer_score: double - influencer_field_name: Field - influencer_field_value: string - initial_influencer_score: double - is_interim: boolean - job_id: Id - probability: double - result_type: string - timestamp: EpochTime - foo?: string -} - -export interface MlJob { - allow_lazy_open: boolean - analysis_config: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - background_persist_interval?: Duration - blocked?: MlJobBlocked - create_time?: DateTime - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days?: long - data_description: MlDataDescription - datafeed_config?: MlDatafeed - deleting?: boolean - description?: string - finished_time?: DateTime - groups?: string[] - job_id: Id - job_type?: string - job_version?: VersionString - model_plot_config?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days: long - renormalization_window_days?: long - results_index_name: IndexName - results_retention_days?: long -} - -export interface MlJobBlocked { - reason: MlJobBlockedReason - task_id?: TaskId -} - -export type MlJobBlockedReason = 'delete' | 'reset' | 'revert' - -export interface MlJobConfig { - allow_lazy_open?: boolean - analysis_config: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - background_persist_interval?: Duration - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days?: long - data_description: MlDataDescription - datafeed_config?: MlDatafeedConfig - description?: string - groups?: string[] - job_id?: Id - job_type?: string - model_plot_config?: MlModelPlotConfig - model_snapshot_retention_days?: long - renormalization_window_days?: long - results_index_name?: IndexName - results_retention_days?: long -} - -export interface MlJobForecastStatistics { - memory_bytes?: MlJobStatistics - processing_time_ms?: MlJobStatistics - records?: MlJobStatistics - status?: Record - total: long - forecasted_jobs: integer -} - -export type MlJobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening' - -export interface MlJobStatistics { - avg: double - max: double - min: double - total: double -} - -export interface MlJobStats { - assignment_explanation?: string - data_counts: MlDataCounts - forecasts_stats: MlJobForecastStatistics - job_id: string - model_size_stats: MlModelSizeStats - node?: MlDiscoveryNodeCompact - open_time?: DateTime - state: MlJobState - timing_stats: MlJobTimingStats - deleting?: boolean -} - -export interface MlJobTimingStats { - average_bucket_processing_time_ms?: DurationValue - bucket_count: long - exponential_average_bucket_processing_time_ms?: DurationValue - exponential_average_bucket_processing_time_per_hour_ms: DurationValue - job_id: Id - total_bucket_processing_time_ms: DurationValue - maximum_bucket_processing_time_ms?: DurationValue - minimum_bucket_processing_time_ms?: DurationValue -} - -export interface MlLearningToRankConfig { - default_params?: Record - feature_extractors?: Record[] - num_top_feature_importance_values: integer -} - -export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' - -export interface MlModelPackageConfig { - create_time?: EpochTime - description?: string - inference_config?: Record - metadata?: Metadata - minimum_version?: string - model_repository?: string - model_type?: string - packaged_model_id: Id - platform_architecture?: string - prefix_strings?: MlTrainedModelPrefixStrings - size?: ByteSize - sha256?: string - tags?: string[] - vocabulary_file?: string -} - -export interface MlModelPlotConfig { - annotations_enabled?: boolean - enabled?: boolean - terms?: Field -} - -export interface MlModelSizeStats { - bucket_allocation_failures_count: long - job_id: Id - log_time: DateTime - memory_status: MlMemoryStatus - model_bytes: ByteSize - model_bytes_exceeded?: ByteSize - model_bytes_memory_limit?: ByteSize - output_memory_allocator_bytes?: ByteSize - peak_model_bytes?: ByteSize - assignment_memory_basis?: string - result_type: string - total_by_field_count: long - total_over_field_count: long - total_partition_field_count: long - categorization_status: MlCategorizationStatus - categorized_doc_count: integer - dead_category_count: integer - failed_category_count: integer - frequent_category_count: integer - rare_category_count: integer - total_category_count: integer - timestamp?: long -} - -export interface MlModelSnapshot { - description?: string - job_id: Id - latest_record_time_stamp?: integer - latest_result_time_stamp?: integer - min_version: VersionString - model_size_stats?: MlModelSizeStats - retain: boolean - snapshot_doc_count: long - snapshot_id: Id - timestamp: long -} - -export interface MlModelSnapshotUpgrade { - job_id: Id - snapshot_id: Id - state: MlSnapshotUpgradeState - node: MlDiscoveryNode - assignment_explanation: string -} - -export interface MlNerInferenceOptions { - tokenization?: MlTokenizationConfigContainer - results_field?: string - classification_labels?: string[] - vocabulary?: MlVocabulary -} - -export interface MlNerInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig { -} - -export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { - add_prefix_space?: boolean -} - -export interface MlNlpTokenizationUpdateOptions { - truncate?: MlTokenizationTruncate - span?: integer -} - -export interface MlOutlierDetectionParameters { - compute_feature_influence?: boolean - feature_influence_threshold?: double - method?: string - n_neighbors?: integer - outlier_fraction?: double - standardization_enabled?: boolean -} - -export interface MlOverallBucket { - bucket_span: DurationValue - is_interim: boolean - jobs: MlOverallBucketJob[] - overall_score: double - result_type: string - timestamp: EpochTime - timestamp_string?: DateTime -} - -export interface MlOverallBucketJob { - job_id: Id - max_anomaly_score: double -} - -export interface MlPage { - from?: integer - size?: integer -} - -export interface MlPassThroughInferenceOptions { - tokenization?: MlTokenizationConfigContainer - results_field?: string - vocabulary?: MlVocabulary -} - -export interface MlPassThroughInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlPerPartitionCategorization { - enabled?: boolean - stop_on_warn?: boolean -} - -export type MlPredictedValue = ScalarValue | ScalarValue[] - -export interface MlQueryFeatureExtractor { - default_score?: float - feature_name: string - query: QueryDslQueryContainer -} - -export interface MlQuestionAnsweringInferenceOptions { - num_top_classes?: integer - tokenization?: MlTokenizationConfigContainer - results_field?: string - max_answer_length?: integer -} - -export interface MlQuestionAnsweringInferenceUpdateOptions { - question: string - num_top_classes?: integer - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string - max_answer_length?: integer -} - -export interface MlRegressionInferenceOptions { - results_field?: Field - num_top_feature_importance_values?: integer -} - -export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping' - -export type MlRuleAction = 'skip_result' | 'skip_model_update' - -export interface MlRuleCondition { - applies_to: MlAppliesTo - operator: MlConditionOperator - value: double -} - -export interface MlRunningStateSearchInterval { - end?: Duration - end_ms: DurationValue - start?: Duration - start_ms: DurationValue -} - -export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' - -export interface MlTextClassificationInferenceOptions { - num_top_classes?: integer - tokenization?: MlTokenizationConfigContainer - results_field?: string - classification_labels?: string[] - vocabulary?: MlVocabulary -} - -export interface MlTextClassificationInferenceUpdateOptions { - num_top_classes?: integer - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string - classification_labels?: string[] -} - -export interface MlTextEmbeddingInferenceOptions { - embedding_size?: integer - tokenization?: MlTokenizationConfigContainer - results_field?: string - vocabulary: MlVocabulary -} - -export interface MlTextEmbeddingInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlTextExpansionInferenceOptions { - tokenization?: MlTokenizationConfigContainer - results_field?: string - vocabulary: MlVocabulary -} - -export interface MlTextExpansionInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string -} - -export interface MlTimingStats { - elapsed_time: DurationValue - iteration_time?: DurationValue -} - -export interface MlTokenizationConfigContainer { - bert?: MlNlpBertTokenizationConfig - bert_ja?: MlNlpBertTokenizationConfig - mpnet?: MlNlpBertTokenizationConfig - roberta?: MlNlpRobertaTokenizationConfig - xlm_roberta?: MlXlmRobertaTokenizationConfig -} - -export type MlTokenizationTruncate = 'first' | 'second' | 'none' - -export interface MlTopClassEntry { - class_name: string - class_probability: double - class_score: double -} - -export interface MlTotalFeatureImportance { - feature_name: Name - importance: MlTotalFeatureImportanceStatistics[] - classes: MlTotalFeatureImportanceClass[] -} - -export interface MlTotalFeatureImportanceClass { - class_name: Name - importance: MlTotalFeatureImportanceStatistics[] -} - -export interface MlTotalFeatureImportanceStatistics { - mean_magnitude: double - max: integer - min: integer -} - -export interface MlTrainedModelAssignment { - adaptive_allocations?: MlAdaptiveAllocationsSettings | null - assignment_state: MlDeploymentAssignmentState - max_assigned_allocations?: integer - reason?: string - routing_table: Record - start_time: DateTime - task_parameters: MlTrainedModelAssignmentTaskParameters -} - -export interface MlTrainedModelAssignmentRoutingStateAndReason { - reason?: string - routing_state: MlRoutingState -} - -export interface MlTrainedModelAssignmentRoutingTable { - reason?: string - routing_state: MlRoutingState - current_allocations: integer - target_allocations: integer -} - -export interface MlTrainedModelAssignmentTaskParameters { - model_bytes: ByteSize - model_id: Id - deployment_id: Id - cache_size?: ByteSize - number_of_allocations: integer - priority: MlTrainingPriority - per_deployment_memory_bytes: ByteSize - per_allocation_memory_bytes: ByteSize - queue_capacity: integer - threads_per_allocation: integer -} - -export interface MlTrainedModelConfig { - model_id: Id - model_type?: MlTrainedModelType - tags: string[] - version?: VersionString - compressed_definition?: string - created_by?: string - create_time?: DateTime - default_field_map?: Record - description?: string - estimated_heap_memory_usage_bytes?: integer - estimated_operations?: integer - fully_defined?: boolean - inference_config?: MlInferenceConfigCreateContainer - input: MlTrainedModelConfigInput - license_level?: string - metadata?: MlTrainedModelConfigMetadata - model_size_bytes?: ByteSize - model_package?: MlModelPackageConfig - location?: MlTrainedModelLocation - platform_architecture?: string - prefix_strings?: MlTrainedModelPrefixStrings -} - -export interface MlTrainedModelConfigInput { - field_names: Field[] -} - -export interface MlTrainedModelConfigMetadata { - model_aliases?: string[] - feature_importance_baseline?: Record - hyperparameters?: MlHyperparameter[] - total_feature_importance?: MlTotalFeatureImportance[] -} - -export interface MlTrainedModelDeploymentAllocationStatus { - allocation_count: integer - state: MlDeploymentAllocationState - target_allocation_count: integer -} - -export interface MlTrainedModelDeploymentNodesStats { - average_inference_time_ms?: DurationValue - average_inference_time_ms_last_minute?: DurationValue - average_inference_time_ms_excluding_cache_hits?: DurationValue - error_count?: integer - inference_count?: long - inference_cache_hit_count?: long - inference_cache_hit_count_last_minute?: long - last_access?: EpochTime - node?: MlDiscoveryNode - number_of_allocations?: integer - number_of_pending_requests?: integer - peak_throughput_per_minute: long - rejected_execution_count?: integer - routing_state: MlTrainedModelAssignmentRoutingStateAndReason - start_time?: EpochTime - threads_per_allocation?: integer - throughput_last_minute: integer - timeout_count?: integer -} - -export interface MlTrainedModelDeploymentStats { - adaptive_allocations?: MlAdaptiveAllocationsSettings - allocation_status?: MlTrainedModelDeploymentAllocationStatus - cache_size?: ByteSize - deployment_id: Id - error_count?: integer - inference_count?: integer - model_id: Id - nodes: MlTrainedModelDeploymentNodesStats[] - number_of_allocations?: integer - peak_throughput_per_minute: long - priority: MlTrainingPriority - queue_capacity?: integer - rejected_execution_count?: integer - reason?: string - start_time: EpochTime - state?: MlDeploymentAssignmentState - threads_per_allocation?: integer - timeout_count?: integer -} - -export interface MlTrainedModelEntities { - class_name: string - class_probability: double - entity: string - start_pos: integer - end_pos: integer -} - -export interface MlTrainedModelInferenceClassImportance { - class_name: string - importance: double -} - -export interface MlTrainedModelInferenceFeatureImportance { - feature_name: string - importance?: double - classes?: MlTrainedModelInferenceClassImportance[] -} - -export interface MlTrainedModelInferenceStats { - cache_miss_count: integer - failure_count: integer - inference_count: integer - missing_all_fields_count: integer - timestamp: EpochTime -} - -export interface MlTrainedModelLocation { - index: MlTrainedModelLocationIndex -} - -export interface MlTrainedModelLocationIndex { - name: IndexName -} - -export interface MlTrainedModelPrefixStrings { - ingest?: string - search?: string -} - -export interface MlTrainedModelSizeStats { - model_size_bytes: ByteSize - required_native_memory_bytes: ByteSize -} - -export interface MlTrainedModelStats { - deployment_stats?: MlTrainedModelDeploymentStats - inference_stats?: MlTrainedModelInferenceStats - ingest?: Record - model_id: Id - model_size_stats: MlTrainedModelSizeStats - pipeline_count: integer -} - -export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' - -export type MlTrainingPriority = 'normal' | 'low' - -export interface MlTransformAuthorization { - api_key?: MlApiKeyAuthorization - roles?: string[] - service_account?: string -} - -export interface MlValidationLoss { - fold_values: string[] - loss_type: string -} - -export interface MlVocabulary { - index: IndexName -} - -export interface MlXlmRobertaTokenizationConfig extends MlCommonTokenizationConfig { -} - -export interface MlZeroShotClassificationInferenceOptions { - tokenization?: MlTokenizationConfigContainer - hypothesis_template?: string - classification_labels: string[] - results_field?: string - multi_label?: boolean - labels?: string[] -} - -export interface MlZeroShotClassificationInferenceUpdateOptions { - tokenization?: MlNlpTokenizationUpdateOptions - results_field?: string - multi_label?: boolean - labels: string[] -} - -export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { - model_id: Id -} - -export interface MlClearTrainedModelDeploymentCacheResponse { - cleared: boolean -} - -export interface MlCloseJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_no_match?: boolean - force?: boolean - timeout?: Duration - } -} - -export interface MlCloseJobResponse { - closed: boolean -} - -export interface MlDeleteCalendarRequest extends RequestBase { - calendar_id: Id -} - -export type MlDeleteCalendarResponse = AcknowledgedResponseBase - -export interface MlDeleteCalendarEventRequest extends RequestBase { - calendar_id: Id - event_id: Id -} - -export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase - -export interface MlDeleteCalendarJobRequest extends RequestBase { - calendar_id: Id - job_id: Ids -} - -export interface MlDeleteCalendarJobResponse { - calendar_id: Id - description?: string - job_ids: Ids -} - -export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { - id: Id - force?: boolean - timeout?: Duration -} - -export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase - -export interface MlDeleteDatafeedRequest extends RequestBase { - datafeed_id: Id - force?: boolean -} - -export type MlDeleteDatafeedResponse = AcknowledgedResponseBase - -export interface MlDeleteExpiredDataRequest extends RequestBase { - job_id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - requests_per_second?: float - timeout?: Duration - } -} - -export interface MlDeleteExpiredDataResponse { - deleted: boolean -} - -export interface MlDeleteFilterRequest extends RequestBase { - filter_id: Id -} - -export type MlDeleteFilterResponse = AcknowledgedResponseBase - -export interface MlDeleteForecastRequest extends RequestBase { - job_id: Id - forecast_id?: Id - allow_no_forecasts?: boolean - timeout?: Duration -} - -export type MlDeleteForecastResponse = AcknowledgedResponseBase - -export interface MlDeleteJobRequest extends RequestBase { - job_id: Id - force?: boolean - delete_user_annotations?: boolean - wait_for_completion?: boolean -} - -export type MlDeleteJobResponse = AcknowledgedResponseBase - -export interface MlDeleteModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id -} - -export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase - -export interface MlDeleteTrainedModelRequest extends RequestBase { - model_id: Id - force?: boolean - timeout?: Duration -} - -export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase - -export interface MlDeleteTrainedModelAliasRequest extends RequestBase { - model_alias: Name - model_id: Id -} - -export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase - -export interface MlEstimateModelMemoryRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - analysis_config?: MlAnalysisConfig - max_bucket_cardinality?: Record - overall_cardinality?: Record - } -} - -export interface MlEstimateModelMemoryResponse { - model_memory_estimate: string -} - -export interface MlEvaluateDataFrameConfusionMatrixItem { - actual_class: Name - actual_class_doc_count: integer - predicted_classes: MlEvaluateDataFrameConfusionMatrixPrediction[] - other_predicted_class_doc_count: integer -} - -export interface MlEvaluateDataFrameConfusionMatrixPrediction { - predicted_class: Name - count: integer -} - -export interface MlEvaluateDataFrameConfusionMatrixThreshold { - tp: integer - fp: integer - tn: integer - fn: integer -} - -export interface MlEvaluateDataFrameDataframeClassificationSummary { - auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc - accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy - multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix - precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision - recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall -} - -export interface MlEvaluateDataFrameDataframeClassificationSummaryAccuracy { - classes: MlEvaluateDataFrameDataframeEvaluationClass[] - overall_accuracy: double -} - -export interface MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix { - confusion_matrix: MlEvaluateDataFrameConfusionMatrixItem[] - other_actual_class_count: integer -} - -export interface MlEvaluateDataFrameDataframeClassificationSummaryPrecision { - classes: MlEvaluateDataFrameDataframeEvaluationClass[] - avg_precision: double -} - -export interface MlEvaluateDataFrameDataframeClassificationSummaryRecall { - classes: MlEvaluateDataFrameDataframeEvaluationClass[] - avg_recall: double -} - -export interface MlEvaluateDataFrameDataframeEvaluationClass extends MlEvaluateDataFrameDataframeEvaluationValue { - class_name: Name -} - -export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc extends MlEvaluateDataFrameDataframeEvaluationValue { - curve?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem[] -} - -export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem { - tpr: double - fpr: double - threshold: double -} - -export interface MlEvaluateDataFrameDataframeEvaluationValue { - value: double -} - -export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { - auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc - precision?: Record - recall?: Record - confusion_matrix?: Record -} - -export interface MlEvaluateDataFrameDataframeRegressionSummary { - huber?: MlEvaluateDataFrameDataframeEvaluationValue - mse?: MlEvaluateDataFrameDataframeEvaluationValue - msle?: MlEvaluateDataFrameDataframeEvaluationValue - r_squared?: MlEvaluateDataFrameDataframeEvaluationValue -} - -export interface MlEvaluateDataFrameRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - evaluation: MlDataframeEvaluationContainer - index: IndexName - query?: QueryDslQueryContainer - } -} - -export interface MlEvaluateDataFrameResponse { - classification?: MlEvaluateDataFrameDataframeClassificationSummary - outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary - regression?: MlEvaluateDataFrameDataframeRegressionSummary -} - -export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { - id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - source?: MlDataframeAnalyticsSource - dest?: MlDataframeAnalyticsDestination - analysis?: MlDataframeAnalysisContainer - description?: string - model_memory_limit?: string - max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - allow_lazy_start?: boolean - } -} - -export interface MlExplainDataFrameAnalyticsResponse { - field_selection: MlDataframeAnalyticsFieldSelection[] - memory_estimation: MlDataframeAnalyticsMemoryEstimation -} - -export interface MlFlushJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - advance_time?: DateTime - calc_interim?: boolean - end?: DateTime - skip_time?: DateTime - start?: DateTime - } -} - -export interface MlFlushJobResponse { - flushed: boolean - last_finalized_bucket_end?: integer -} - -export interface MlForecastRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - duration?: Duration - expires_in?: Duration - max_model_memory?: string - } -} - -export interface MlForecastResponse { - acknowledged: boolean - forecast_id: Id -} - -export interface MlGetBucketsRequest extends RequestBase { - job_id: Id - timestamp?: DateTime - from?: integer - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - anomaly_score?: double - desc?: boolean - end?: DateTime - exclude_interim?: boolean - expand?: boolean - page?: MlPage - sort?: Field - start?: DateTime - } -} - -export interface MlGetBucketsResponse { - buckets: MlBucketSummary[] - count: long -} - -export interface MlGetCalendarEventsRequest extends RequestBase { - calendar_id: Id - end?: DateTime - from?: integer - job_id?: Id - size?: integer - start?: DateTime -} - -export interface MlGetCalendarEventsResponse { - count: long - events: MlCalendarEvent[] -} - -export interface MlGetCalendarsCalendar { - calendar_id: Id - description?: string - job_ids: Id[] -} - -export interface MlGetCalendarsRequest extends RequestBase { - calendar_id?: Id - from?: integer - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - page?: MlPage - } -} - -export interface MlGetCalendarsResponse { - calendars: MlGetCalendarsCalendar[] - count: long -} - -export interface MlGetCategoriesRequest extends RequestBase { - job_id: Id - category_id?: CategoryId - from?: integer - partition_field_value?: string - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - page?: MlPage - } -} - -export interface MlGetCategoriesResponse { - categories: MlCategory[] - count: long -} - -export interface MlGetDataFrameAnalyticsRequest extends RequestBase { - id?: Id - allow_no_match?: boolean - from?: integer - size?: integer - exclude_generated?: boolean -} - -export interface MlGetDataFrameAnalyticsResponse { - count: integer - data_frame_analytics: MlDataframeAnalyticsSummary[] -} - -export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { - id?: Id - allow_no_match?: boolean - from?: integer - size?: integer - verbose?: boolean -} - -export interface MlGetDataFrameAnalyticsStatsResponse { - count: long - data_frame_analytics: MlDataframeAnalytics[] -} - -export interface MlGetDatafeedStatsRequest extends RequestBase { - datafeed_id?: Ids - allow_no_match?: boolean -} - -export interface MlGetDatafeedStatsResponse { - count: long - datafeeds: MlDatafeedStats[] -} - -export interface MlGetDatafeedsRequest extends RequestBase { - datafeed_id?: Ids - allow_no_match?: boolean - exclude_generated?: boolean -} - -export interface MlGetDatafeedsResponse { - count: long - datafeeds: MlDatafeed[] -} - -export interface MlGetFiltersRequest extends RequestBase { - filter_id?: Ids - from?: integer - size?: integer -} - -export interface MlGetFiltersResponse { - count: long - filters: MlFilter[] -} - -export interface MlGetInfluencersRequest extends RequestBase { - job_id: Id - desc?: boolean - end?: DateTime - exclude_interim?: boolean - influencer_score?: double - from?: integer - size?: integer - sort?: Field - start?: DateTime - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - page?: MlPage - } -} - -export interface MlGetInfluencersResponse { - count: long - influencers: MlInfluencer[] -} - -export interface MlGetJobStatsRequest extends RequestBase { - job_id?: Id - allow_no_match?: boolean -} - -export interface MlGetJobStatsResponse { - count: long - jobs: MlJobStats[] -} - -export interface MlGetJobsRequest extends RequestBase { - job_id?: Ids - allow_no_match?: boolean - exclude_generated?: boolean -} - -export interface MlGetJobsResponse { - count: long - jobs: MlJob[] -} - -export interface MlGetMemoryStatsJvmStats { - heap_max?: ByteSize - heap_max_in_bytes: integer - java_inference?: ByteSize - java_inference_in_bytes: integer - java_inference_max?: ByteSize - java_inference_max_in_bytes: integer -} - -export interface MlGetMemoryStatsMemMlStats { - anomaly_detectors?: ByteSize - anomaly_detectors_in_bytes: integer - data_frame_analytics?: ByteSize - data_frame_analytics_in_bytes: integer - max?: ByteSize - max_in_bytes: integer - native_code_overhead?: ByteSize - native_code_overhead_in_bytes: integer - native_inference?: ByteSize - native_inference_in_bytes: integer -} - -export interface MlGetMemoryStatsMemStats { - adjusted_total?: ByteSize - adjusted_total_in_bytes: integer - total?: ByteSize - total_in_bytes: integer - ml: MlGetMemoryStatsMemMlStats -} - -export interface MlGetMemoryStatsMemory { - attributes: Record - jvm: MlGetMemoryStatsJvmStats - mem: MlGetMemoryStatsMemStats - name: Name - roles: string[] - transport_address: TransportAddress - ephemeral_id: Id -} - -export interface MlGetMemoryStatsRequest extends RequestBase { - node_id?: Id - master_timeout?: Duration - timeout?: Duration -} - -export interface MlGetMemoryStatsResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { - job_id: Id - snapshot_id: Id - allow_no_match?: boolean -} - -export interface MlGetModelSnapshotUpgradeStatsResponse { - count: long - model_snapshot_upgrades: MlModelSnapshotUpgrade[] -} - -export interface MlGetModelSnapshotsRequest extends RequestBase { - job_id: Id - snapshot_id?: Id - from?: integer - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - desc?: boolean - end?: DateTime - page?: MlPage - sort?: Field - start?: DateTime - } -} - -export interface MlGetModelSnapshotsResponse { - count: long - model_snapshots: MlModelSnapshot[] -} - -export interface MlGetOverallBucketsRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_no_match?: boolean - bucket_span?: Duration - end?: DateTime - exclude_interim?: boolean - overall_score?: double | string - start?: DateTime - top_n?: integer - } -} - -export interface MlGetOverallBucketsResponse { - count: long - overall_buckets: MlOverallBucket[] -} - -export interface MlGetRecordsRequest extends RequestBase { - job_id: Id - from?: integer - size?: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - desc?: boolean - end?: DateTime - exclude_interim?: boolean - page?: MlPage - record_score?: double - sort?: Field - start?: DateTime - } -} - -export interface MlGetRecordsResponse { - count: long - records: MlAnomaly[] -} - -export interface MlGetTrainedModelsRequest extends RequestBase { - model_id?: Ids - allow_no_match?: boolean - decompress_definition?: boolean - exclude_generated?: boolean - from?: integer - include?: MlInclude - include_model_definition?: boolean - size?: integer - tags?: string | string[] -} - -export interface MlGetTrainedModelsResponse { - count: integer - trained_model_configs: MlTrainedModelConfig[] -} - -export interface MlGetTrainedModelsStatsRequest extends RequestBase { - model_id?: Ids - allow_no_match?: boolean - from?: integer - size?: integer -} - -export interface MlGetTrainedModelsStatsResponse { - count: integer - trained_model_stats: MlTrainedModelStats[] -} - -export interface MlInferTrainedModelRequest extends RequestBase { - model_id: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs: Record[] - inference_config?: MlInferenceConfigUpdateContainer - } -} - -export interface MlInferTrainedModelResponse { - inference_results: MlInferenceResponseResult[] -} - -export interface MlInfoAnomalyDetectors { - categorization_analyzer: MlCategorizationAnalyzer - categorization_examples_limit: integer - model_memory_limit: string - model_snapshot_retention_days: integer - daily_model_snapshot_retention_after_days: integer -} - -export interface MlInfoDatafeeds { - scroll_size: integer -} - -export interface MlInfoDefaults { - anomaly_detectors: MlInfoAnomalyDetectors - datafeeds: MlInfoDatafeeds -} - -export interface MlInfoLimits { - max_single_ml_node_processors?: integer - total_ml_processors?: integer - max_model_memory_limit?: ByteSize - effective_max_model_memory_limit?: ByteSize - total_ml_memory: ByteSize -} - -export interface MlInfoNativeCode { - build_hash: string - version: VersionString -} - -export interface MlInfoRequest extends RequestBase { -} - -export interface MlInfoResponse { - defaults: MlInfoDefaults - limits: MlInfoLimits - upgrade_mode: boolean - native_code: MlInfoNativeCode -} - -export interface MlOpenJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - timeout?: Duration - } -} - -export interface MlOpenJobResponse { - opened: boolean - node: NodeId -} - -export interface MlPostCalendarEventsRequest extends RequestBase { - calendar_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - events: MlCalendarEvent[] - } -} - -export interface MlPostCalendarEventsResponse { - events: MlCalendarEvent[] -} - -export interface MlPostDataRequest extends RequestBase { - job_id: Id - reset_end?: DateTime - reset_start?: DateTime - /** @deprecated The use of the 'body' key has been deprecated, use 'data' instead. */ - body?: TData[] -} - -export interface MlPostDataResponse { - job_id: Id - processed_record_count: long - processed_field_count: long - input_bytes: long - input_field_count: long - invalid_date_count: long - missing_field_count: long - out_of_order_timestamp_count: long - empty_bucket_count: long - sparse_bucket_count: long - bucket_count: long - earliest_record_timestamp?: EpochTime - latest_record_timestamp?: EpochTime - last_data_time?: EpochTime - latest_empty_bucket_timestamp?: EpochTime - latest_sparse_bucket_timestamp?: EpochTime - input_record_count: long - log_time?: EpochTime -} - -export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { - source: MlDataframeAnalyticsSource - analysis: MlDataframeAnalysisContainer - model_memory_limit?: string - max_num_threads?: integer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] -} - -export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { - id?: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig - } -} - -export interface MlPreviewDataFrameAnalyticsResponse { - feature_values: Record[] -} - -export interface MlPreviewDatafeedRequest extends RequestBase { - datafeed_id?: Id - start?: DateTime - end?: DateTime - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - datafeed_config?: MlDatafeedConfig - job_config?: MlJobConfig - } -} - -export type MlPreviewDatafeedResponse = TDocument[] - -export interface MlPutCalendarRequest extends RequestBase { - calendar_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - job_ids?: Id[] - description?: string - } -} - -export interface MlPutCalendarResponse { - calendar_id: Id - description?: string - job_ids: Ids -} - -export interface MlPutCalendarJobRequest extends RequestBase { - calendar_id: Id - job_id: Ids -} - -export interface MlPutCalendarJobResponse { - calendar_id: Id - description?: string - job_ids: Ids -} - -export interface MlPutDataFrameAnalyticsRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_lazy_start?: boolean - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - description?: string - dest: MlDataframeAnalyticsDestination - max_num_threads?: integer - _meta?: Metadata - model_memory_limit?: string - source: MlDataframeAnalyticsSource - headers?: HttpHeaders - version?: VersionString - } -} - -export interface MlPutDataFrameAnalyticsResponse { - authorization?: MlDataframeAnalyticsAuthorization - allow_lazy_start: boolean - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - create_time: EpochTime - description?: string - dest: MlDataframeAnalyticsDestination - id: Id - max_num_threads: integer - _meta?: Metadata - model_memory_limit: string - source: MlDataframeAnalyticsSource - version: VersionString -} - -export interface MlPutDatafeedRequest extends RequestBase { - datafeed_id: Id - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - chunking_config?: MlChunkingConfig - delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Duration - indices?: Indices - /** @alias indices */ - indexes?: Indices - indices_options?: IndicesOptions - job_id?: Id - max_empty_searches?: integer - query?: QueryDslQueryContainer - query_delay?: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size?: integer - headers?: HttpHeaders - } -} - -export interface MlPutDatafeedResponse { - aggregations?: Record - authorization?: MlDatafeedAuthorization - chunking_config: MlChunkingConfig - delayed_data_check_config?: MlDelayedDataCheckConfig - datafeed_id: Id - frequency?: Duration - indices: string[] - job_id: Id - indices_options?: IndicesOptions - max_empty_searches?: integer - query: QueryDslQueryContainer - query_delay: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size: integer -} - -export interface MlPutFilterRequest extends RequestBase { - filter_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - items?: string[] - } -} - -export interface MlPutFilterResponse { - description: string - filter_id: Id - items: string[] -} - -export interface MlPutJobRequest extends RequestBase { - job_id: Id - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_lazy_open?: boolean - analysis_config: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - background_persist_interval?: Duration - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days?: long - data_description: MlDataDescription - datafeed_config?: MlDatafeedConfig - description?: string - job_id?: Id - groups?: string[] - model_plot_config?: MlModelPlotConfig - model_snapshot_retention_days?: long - renormalization_window_days?: long - results_index_name?: IndexName - results_retention_days?: long - } -} - -export interface MlPutJobResponse { - allow_lazy_open: boolean - analysis_config: MlAnalysisConfigRead - analysis_limits: MlAnalysisLimits - background_persist_interval?: Duration - create_time: DateTime - custom_settings?: MlCustomSettings - daily_model_snapshot_retention_after_days: long - data_description: MlDataDescription - datafeed_config?: MlDatafeed - description?: string - groups?: string[] - job_id: Id - job_type: string - job_version: string - model_plot_config?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days: long - renormalization_window_days?: long - results_index_name: string - results_retention_days?: long -} - -export interface MlPutTrainedModelAggregateOutput { - logistic_regression?: MlPutTrainedModelWeights - weighted_sum?: MlPutTrainedModelWeights - weighted_mode?: MlPutTrainedModelWeights - exponent?: MlPutTrainedModelWeights -} - -export interface MlPutTrainedModelDefinition { - preprocessors?: MlPutTrainedModelPreprocessor[] - trained_model: MlPutTrainedModelTrainedModel -} - -export interface MlPutTrainedModelEnsemble { - aggregate_output?: MlPutTrainedModelAggregateOutput - classification_labels?: string[] - feature_names?: string[] - target_type?: string - trained_models: MlPutTrainedModelTrainedModel[] -} - -export interface MlPutTrainedModelFrequencyEncodingPreprocessor { - field: string - feature_name: string - frequency_map: Record -} - -export interface MlPutTrainedModelInput { - field_names: Names -} - -export interface MlPutTrainedModelOneHotEncodingPreprocessor { - field: string - hot_map: Record -} - -export interface MlPutTrainedModelPreprocessor { - frequency_encoding?: MlPutTrainedModelFrequencyEncodingPreprocessor - one_hot_encoding?: MlPutTrainedModelOneHotEncodingPreprocessor - target_mean_encoding?: MlPutTrainedModelTargetMeanEncodingPreprocessor -} - -export interface MlPutTrainedModelRequest extends RequestBase { - model_id: Id - defer_definition_decompression?: boolean - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - compressed_definition?: string - definition?: MlPutTrainedModelDefinition - description?: string - inference_config?: MlInferenceConfigCreateContainer - input?: MlPutTrainedModelInput - metadata?: any - model_type?: MlTrainedModelType - model_size_bytes?: long - platform_architecture?: string - tags?: string[] - prefix_strings?: MlTrainedModelPrefixStrings - } -} - -export type MlPutTrainedModelResponse = MlTrainedModelConfig - -export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { - field: string - feature_name: string - target_map: Record - default_value: double -} - -export interface MlPutTrainedModelTrainedModel { - tree?: MlPutTrainedModelTrainedModelTree - tree_node?: MlPutTrainedModelTrainedModelTreeNode - ensemble?: MlPutTrainedModelEnsemble -} - -export interface MlPutTrainedModelTrainedModelTree { - classification_labels?: string[] - feature_names: string[] - target_type?: string - tree_structure: MlPutTrainedModelTrainedModelTreeNode[] -} - -export interface MlPutTrainedModelTrainedModelTreeNode { - decision_type?: string - default_left?: boolean - leaf_value?: double - left_child?: integer - node_index: integer - right_child?: integer - split_feature?: integer - split_gain?: integer - threshold?: double -} - -export interface MlPutTrainedModelWeights { - weights: double -} - -export interface MlPutTrainedModelAliasRequest extends RequestBase { - model_alias: Name - model_id: Id - reassign?: boolean -} - -export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase - -export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { - model_id: Id - part: integer - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - definition: string - total_definition_length: long - total_parts: integer - } -} - -export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase - -export interface MlPutTrainedModelVocabularyRequest extends RequestBase { - model_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - vocabulary: string[] - merges?: string[] - scores?: double[] - } -} - -export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase - -export interface MlResetJobRequest extends RequestBase { - job_id: Id - wait_for_completion?: boolean - delete_user_annotations?: boolean -} - -export type MlResetJobResponse = AcknowledgedResponseBase - -export interface MlRevertModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - delete_intervening_results?: boolean - } -} - -export interface MlRevertModelSnapshotResponse { - model: MlModelSnapshot -} - -export interface MlSetUpgradeModeRequest extends RequestBase { - enabled?: boolean - timeout?: Duration -} - -export type MlSetUpgradeModeResponse = AcknowledgedResponseBase - -export interface MlStartDataFrameAnalyticsRequest extends RequestBase { - id: Id - timeout?: Duration -} - -export interface MlStartDataFrameAnalyticsResponse { - acknowledged: boolean - node: NodeId -} - -export interface MlStartDatafeedRequest extends RequestBase { - datafeed_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - end?: DateTime - start?: DateTime - timeout?: Duration - } -} - -export interface MlStartDatafeedResponse { - node: NodeIds - started: boolean -} - -export interface MlStartTrainedModelDeploymentRequest extends RequestBase { - model_id: Id - cache_size?: ByteSize - deployment_id?: string - number_of_allocations?: integer - priority?: MlTrainingPriority - queue_capacity?: integer - threads_per_allocation?: integer - timeout?: Duration - wait_for?: MlDeploymentAllocationState - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - adaptive_allocations?: MlAdaptiveAllocationsSettings - } -} - -export interface MlStartTrainedModelDeploymentResponse { - assignment: MlTrainedModelAssignment -} - -export interface MlStopDataFrameAnalyticsRequest extends RequestBase { - id: Id - allow_no_match?: boolean - force?: boolean - timeout?: Duration -} - -export interface MlStopDataFrameAnalyticsResponse { - stopped: boolean -} - -export interface MlStopDatafeedRequest extends RequestBase { - datafeed_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_no_match?: boolean - force?: boolean - timeout?: Duration - } -} - -export interface MlStopDatafeedResponse { - stopped: boolean -} - -export interface MlStopTrainedModelDeploymentRequest extends RequestBase { - model_id: Id - allow_no_match?: boolean - force?: boolean -} - -export interface MlStopTrainedModelDeploymentResponse { - stopped: boolean -} - -export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - model_memory_limit?: string - max_num_threads?: integer - allow_lazy_start?: boolean - } -} - -export interface MlUpdateDataFrameAnalyticsResponse { - authorization?: MlDataframeAnalyticsAuthorization - allow_lazy_start: boolean - analysis: MlDataframeAnalysisContainer - analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - create_time: long - description?: string - dest: MlDataframeAnalyticsDestination - id: Id - max_num_threads: integer - model_memory_limit: string - source: MlDataframeAnalyticsSource - version: VersionString -} - -export interface MlUpdateDatafeedRequest extends RequestBase { - datafeed_id: Id - allow_no_indices?: boolean - expand_wildcards?: ExpandWildcards - ignore_throttled?: boolean - ignore_unavailable?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - chunking_config?: MlChunkingConfig - delayed_data_check_config?: MlDelayedDataCheckConfig - frequency?: Duration - indices?: string[] - /** @alias indices */ - indexes?: string[] - indices_options?: IndicesOptions - job_id?: Id - max_empty_searches?: integer - query?: QueryDslQueryContainer - query_delay?: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size?: integer - } -} - -export interface MlUpdateDatafeedResponse { - authorization?: MlDatafeedAuthorization - aggregations?: Record - chunking_config: MlChunkingConfig - delayed_data_check_config?: MlDelayedDataCheckConfig - datafeed_id: Id - frequency?: Duration - indices: string[] - indices_options?: IndicesOptions - job_id: Id - max_empty_searches?: integer - query: QueryDslQueryContainer - query_delay: Duration - runtime_mappings?: MappingRuntimeFields - script_fields?: Record - scroll_size: integer -} - -export interface MlUpdateFilterRequest extends RequestBase { - filter_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - add_items?: string[] - description?: string - remove_items?: string[] - } -} - -export interface MlUpdateFilterResponse { - description: string - filter_id: Id - items: string[] -} - -export interface MlUpdateJobRequest extends RequestBase { - job_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_lazy_open?: boolean - analysis_limits?: MlAnalysisMemoryLimit - background_persist_interval?: Duration - custom_settings?: Record - categorization_filters?: string[] - description?: string - model_plot_config?: MlModelPlotConfig - model_prune_window?: Duration - daily_model_snapshot_retention_after_days?: long - model_snapshot_retention_days?: long - renormalization_window_days?: long - results_retention_days?: long - groups?: string[] - detectors?: MlDetectorUpdate[] - per_partition_categorization?: MlPerPartitionCategorization - } -} - -export interface MlUpdateJobResponse { - allow_lazy_open: boolean - analysis_config: MlAnalysisConfigRead - analysis_limits: MlAnalysisLimits - background_persist_interval?: Duration - create_time: EpochTime - finished_time?: EpochTime - custom_settings?: Record - daily_model_snapshot_retention_after_days: long - data_description: MlDataDescription - datafeed_config?: MlDatafeed - description?: string - groups?: string[] - job_id: Id - job_type: string - job_version: VersionString - model_plot_config?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days: long - renormalization_window_days?: long - results_index_name: IndexName - results_retention_days?: long -} - -export interface MlUpdateModelSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - description?: string - retain?: boolean - } -} - -export interface MlUpdateModelSnapshotResponse { - acknowledged: boolean - model: MlModelSnapshot -} - -export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { - model_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - number_of_allocations?: integer - adaptive_allocations?: MlAdaptiveAllocationsSettings - } -} - -export interface MlUpdateTrainedModelDeploymentResponse { - assignment: MlTrainedModelAssignment -} - -export interface MlUpgradeJobSnapshotRequest extends RequestBase { - job_id: Id - snapshot_id: Id - wait_for_completion?: boolean - timeout?: Duration -} - -export interface MlUpgradeJobSnapshotResponse { - node: NodeId - completed: boolean -} - -export interface MlValidateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - job_id?: Id - analysis_config?: MlAnalysisConfig - analysis_limits?: MlAnalysisLimits - data_description?: MlDataDescription - description?: string - model_plot?: MlModelPlotConfig - model_snapshot_id?: Id - model_snapshot_retention_days?: long - results_index_name?: IndexName - } -} - -export type MlValidateResponse = AcknowledgedResponseBase - -export interface MlValidateDetectorRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, use 'detector' instead. */ - body?: MlDetector -} - -export type MlValidateDetectorResponse = AcknowledgedResponseBase - -export interface MonitoringBulkRequest extends RequestBase { - type?: string - system_id: string - system_api_version: string - interval: Duration - /** @deprecated The use of the 'body' key has been deprecated, use 'operations' instead. */ - body?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] -} - -export interface MonitoringBulkResponse { - error?: ErrorCause - errors: boolean - ignored: boolean - took: long -} - -export interface NodesAdaptiveSelection { - avg_queue_size?: long - avg_response_time?: Duration - avg_response_time_ns?: long - avg_service_time?: Duration - avg_service_time_ns?: long - outgoing_searches?: long - rank?: string -} - -export interface NodesBreaker { - estimated_size?: string - estimated_size_in_bytes?: long - limit_size?: string - limit_size_in_bytes?: long - overhead?: float - tripped?: float -} - -export interface NodesCgroup { - cpuacct?: NodesCpuAcct - cpu?: NodesCgroupCpu - memory?: NodesCgroupMemory -} - -export interface NodesCgroupCpu { - control_group?: string - cfs_period_micros?: integer - cfs_quota_micros?: integer - stat?: NodesCgroupCpuStat -} - -export interface NodesCgroupCpuStat { - number_of_elapsed_periods?: long - number_of_times_throttled?: long - time_throttled_nanos?: DurationValue -} - -export interface NodesCgroupMemory { - control_group?: string - limit_in_bytes?: string - usage_in_bytes?: string -} - -export interface NodesClient { - id?: long - agent?: string - local_address?: string - remote_address?: string - last_uri?: string - opened_time_millis?: long - closed_time_millis?: long - last_request_time_millis?: long - request_count?: long - request_size_bytes?: long - x_opaque_id?: string -} - -export interface NodesClusterAppliedStats { - recordings?: NodesRecording[] -} - -export interface NodesClusterStateQueue { - total?: long - pending?: long - committed?: long -} - -export interface NodesClusterStateUpdate { - count: long - computation_time?: Duration - computation_time_millis?: DurationValue - publication_time?: Duration - publication_time_millis?: DurationValue - context_construction_time?: Duration - context_construction_time_millis?: DurationValue - commit_time?: Duration - commit_time_millis?: DurationValue - completion_time?: Duration - completion_time_millis?: DurationValue - master_apply_time?: Duration - master_apply_time_millis?: DurationValue - notification_time?: Duration - notification_time_millis?: DurationValue -} - -export interface NodesContext { - context?: string - compilations?: long - cache_evictions?: long - compilation_limit_triggered?: long -} - -export interface NodesCpu { - percent?: integer - sys?: Duration - sys_in_millis?: DurationValue - total?: Duration - total_in_millis?: DurationValue - user?: Duration - user_in_millis?: DurationValue - load_average?: Record -} - -export interface NodesCpuAcct { - control_group?: string - usage_nanos?: DurationValue -} - -export interface NodesDataPathStats { - available?: string - available_in_bytes?: long - disk_queue?: string - disk_reads?: long - disk_read_size?: string - disk_read_size_in_bytes?: long - disk_writes?: long - disk_write_size?: string - disk_write_size_in_bytes?: long - free?: string - free_in_bytes?: long - mount?: string - path?: string - total?: string - total_in_bytes?: long - type?: string -} - -export interface NodesDiscovery { - cluster_state_queue?: NodesClusterStateQueue - published_cluster_states?: NodesPublishedClusterStates - cluster_state_update?: Record - serialized_cluster_states?: NodesSerializedClusterState - cluster_applier_stats?: NodesClusterAppliedStats -} - -export interface NodesExtendedMemoryStats extends NodesMemoryStats { - free_percent?: integer - used_percent?: integer -} - -export interface NodesFileSystem { - data?: NodesDataPathStats[] - timestamp?: long - total?: NodesFileSystemTotal - io_stats?: NodesIoStats -} - -export interface NodesFileSystemTotal { - available?: string - available_in_bytes?: long - free?: string - free_in_bytes?: long - total?: string - total_in_bytes?: long -} - -export interface NodesGarbageCollector { - collectors?: Record -} - -export interface NodesGarbageCollectorTotal { - collection_count?: long - collection_time?: string - collection_time_in_millis?: long -} - -export interface NodesHttp { - current_open?: integer - total_opened?: long - clients?: NodesClient[] - routes: Record -} - -export interface NodesHttpRoute { - requests: NodesHttpRouteRequests - responses: NodesHttpRouteResponses -} - -export interface NodesHttpRouteRequests { - count: long - total_size_in_bytes: long - size_histogram: NodesSizeHttpHistogram[] -} - -export interface NodesHttpRouteResponses { - count: long - total_size_in_bytes: long - handling_time_histogram: NodesTimeHttpHistogram[] - size_histogram: NodesSizeHttpHistogram[] -} - -export interface NodesIndexingPressure { - memory?: NodesIndexingPressureMemory -} - -export interface NodesIndexingPressureMemory { - limit?: ByteSize - limit_in_bytes?: long - current?: NodesPressureMemory - total?: NodesPressureMemory -} - -export interface NodesIngest { - pipelines?: Record - total?: NodesIngestTotal -} - -export interface NodesIngestStats { - count: long - current: long - failed: long - processors: Record[] - time_in_millis: DurationValue - ingested_as_first_pipeline_in_bytes: long - produced_as_first_pipeline_in_bytes: long -} - -export interface NodesIngestTotal { - count: long - current: long - failed: long - time_in_millis: DurationValue -} - -export interface NodesIoStatDevice { - device_name?: string - operations?: long - read_kilobytes?: long - read_operations?: long - write_kilobytes?: long - write_operations?: long -} - -export interface NodesIoStats { - devices?: NodesIoStatDevice[] - total?: NodesIoStatDevice -} - -export interface NodesJvm { - buffer_pools?: Record - classes?: NodesJvmClasses - gc?: NodesGarbageCollector - mem?: NodesJvmMemoryStats - threads?: NodesJvmThreads - timestamp?: long - uptime?: string - uptime_in_millis?: long -} - -export interface NodesJvmClasses { - current_loaded_count?: long - total_loaded_count?: long - total_unloaded_count?: long -} - -export interface NodesJvmMemoryStats { - heap_used_in_bytes?: long - heap_used_percent?: long - heap_committed_in_bytes?: long - heap_max_in_bytes?: long - heap_max?: ByteSize - non_heap_used_in_bytes?: long - non_heap_committed_in_bytes?: long - pools?: Record -} - -export interface NodesJvmThreads { - count?: long - peak_count?: long -} - -export interface NodesKeyedProcessor { - stats?: NodesProcessor - type?: string -} - -export interface NodesMemoryStats { - adjusted_total_in_bytes?: long - resident?: string - resident_in_bytes?: long - share?: string - share_in_bytes?: long - total_virtual?: string - total_virtual_in_bytes?: long - total_in_bytes?: long - free_in_bytes?: long - used_in_bytes?: long -} - -export interface NodesNodeBufferPool { - count?: long - total_capacity?: string - total_capacity_in_bytes?: long - used?: string - used_in_bytes?: long -} - -export interface NodesNodeReloadResult { - name: Name - reload_exception?: ErrorCause -} - -export interface NodesNodesResponseBase { - _nodes?: NodeStatistics -} - -export interface NodesOperatingSystem { - cpu?: NodesCpu - mem?: NodesExtendedMemoryStats - swap?: NodesMemoryStats - cgroup?: NodesCgroup - timestamp?: long -} - -export interface NodesPool { - used_in_bytes?: long - max_in_bytes?: long - peak_used_in_bytes?: long - peak_max_in_bytes?: long -} - -export interface NodesPressureMemory { - all?: ByteSize - all_in_bytes?: long - combined_coordinating_and_primary?: ByteSize - combined_coordinating_and_primary_in_bytes?: long - coordinating?: ByteSize - coordinating_in_bytes?: long - primary?: ByteSize - primary_in_bytes?: long - replica?: ByteSize - replica_in_bytes?: long - coordinating_rejections?: long - primary_rejections?: long - replica_rejections?: long - primary_document_rejections?: long - large_operation_rejections?: long -} - -export interface NodesProcess { - cpu?: NodesCpu - mem?: NodesMemoryStats - open_file_descriptors?: integer - max_file_descriptors?: integer - timestamp?: long -} - -export interface NodesProcessor { - count?: long - current?: long - failed?: long - time_in_millis?: DurationValue -} - -export interface NodesPublishedClusterStates { - full_states?: long - incompatible_diffs?: long - compatible_diffs?: long -} - -export interface NodesRecording { - name?: string - cumulative_execution_count?: long - cumulative_execution_time?: Duration - cumulative_execution_time_millis?: DurationValue -} - -export interface NodesRepositoryLocation { - base_path: string - container?: string - bucket?: string -} - -export interface NodesRepositoryMeteringInformation { - repository_name: Name - repository_type: string - repository_location: NodesRepositoryLocation - repository_ephemeral_id: Id - repository_started_at: EpochTime - repository_stopped_at?: EpochTime - archived: boolean - cluster_version?: VersionNumber - request_counts: NodesRequestCounts -} - -export interface NodesRequestCounts { - GetBlobProperties?: long - GetBlob?: long - ListBlobs?: long - PutBlob?: long - PutBlock?: long - PutBlockList?: long - GetObject?: long - ListObjects?: long - InsertObject?: long - PutObject?: long - PutMultipartObject?: long -} - -export interface NodesScriptCache { - cache_evictions?: long - compilation_limit_triggered?: long - compilations?: long - context?: string -} - -export interface NodesScripting { - cache_evictions?: long - compilations?: long - compilations_history?: Record - compilation_limit_triggered?: long - contexts?: NodesContext[] -} - -export interface NodesSerializedClusterState { - full_states?: NodesSerializedClusterStateDetail - diffs?: NodesSerializedClusterStateDetail -} - -export interface NodesSerializedClusterStateDetail { - count?: long - uncompressed_size?: string - uncompressed_size_in_bytes?: long - compressed_size?: string - compressed_size_in_bytes?: long -} - -export interface NodesSizeHttpHistogram { - count: long - ge_bytes?: long - lt_bytes?: long -} - -export interface NodesStats { - adaptive_selection?: Record - breakers?: Record - fs?: NodesFileSystem - host?: Host - http?: NodesHttp - ingest?: NodesIngest - ip?: Ip | Ip[] - jvm?: NodesJvm - name?: Name - os?: NodesOperatingSystem - process?: NodesProcess - roles?: NodeRoles - script?: NodesScripting - script_cache?: Record - thread_pool?: Record - timestamp?: long - transport?: NodesTransport - transport_address?: TransportAddress - attributes?: Record - discovery?: NodesDiscovery - indexing_pressure?: NodesIndexingPressure - indices?: IndicesStatsShardStats -} - -export interface NodesThreadCount { - active?: long - completed?: long - largest?: long - queue?: long - rejected?: long - threads?: long -} - -export interface NodesTimeHttpHistogram { - count: long - ge_millis?: long - lt_millis?: long -} - -export interface NodesTransport { - inbound_handling_time_histogram?: NodesTransportHistogram[] - outbound_handling_time_histogram?: NodesTransportHistogram[] - rx_count?: long - rx_size?: string - rx_size_in_bytes?: long - server_open?: integer - tx_count?: long - tx_size?: string - tx_size_in_bytes?: long - total_outbound_connections?: long -} - -export interface NodesTransportHistogram { - count?: long - lt_millis?: long - ge_millis?: long -} - -export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { - node_id: NodeIds - max_archive_version: long -} - -export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase - -export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { - node_id: NodeIds -} - -export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase - -export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesHotThreadsRequest extends RequestBase { - node_id?: NodeIds - ignore_idle_threads?: boolean - interval?: Duration - snapshots?: long - threads?: long - timeout?: Duration - type?: ThreadType - sort?: ThreadType -} - -export interface NodesHotThreadsResponse { -} - -export interface NodesInfoDeprecationIndexing { - enabled: boolean | string -} - -export interface NodesInfoNodeInfo { - attributes: Record - build_flavor: string - build_hash: string - build_type: string - component_versions: Record - host: Host - http?: NodesInfoNodeInfoHttp - index_version: VersionNumber - ip: Ip - jvm?: NodesInfoNodeJvmInfo - name: Name - os?: NodesInfoNodeOperatingSystemInfo - plugins?: PluginStats[] - process?: NodesInfoNodeProcessInfo - roles: NodeRoles - settings?: NodesInfoNodeInfoSettings - thread_pool?: Record - total_indexing_buffer?: long - total_indexing_buffer_in_bytes?: ByteSize - transport?: NodesInfoNodeInfoTransport - transport_address: TransportAddress - transport_version: VersionNumber - version: VersionString - modules?: PluginStats[] - ingest?: NodesInfoNodeInfoIngest - aggregations?: Record - remote_cluster_server?: NodesInfoRemoveClusterServer -} - -export interface NodesInfoNodeInfoAction { - destructive_requires_name: string -} - -export interface NodesInfoNodeInfoAggregation { - types: string[] -} - -export interface NodesInfoNodeInfoBootstrap { - memory_lock: string -} - -export interface NodesInfoNodeInfoClient { - type: string -} - -export interface NodesInfoNodeInfoDiscoverKeys { - seed_hosts?: string[] | string - type?: string - seed_providers?: string[] -} -export type NodesInfoNodeInfoDiscover = NodesInfoNodeInfoDiscoverKeys -& { [property: string]: any } - -export interface NodesInfoNodeInfoHttp { - bound_address: string[] - max_content_length?: ByteSize - max_content_length_in_bytes: long - publish_address: string -} - -export interface NodesInfoNodeInfoIngest { - processors: NodesInfoNodeInfoIngestProcessor[] -} - -export interface NodesInfoNodeInfoIngestDownloader { - enabled: string -} - -export interface NodesInfoNodeInfoIngestInfo { - downloader: NodesInfoNodeInfoIngestDownloader -} - -export interface NodesInfoNodeInfoIngestProcessor { - type: string -} - -export interface NodesInfoNodeInfoJvmMemory { - direct_max?: ByteSize - direct_max_in_bytes: long - heap_init?: ByteSize - heap_init_in_bytes: long - heap_max?: ByteSize - heap_max_in_bytes: long - non_heap_init?: ByteSize - non_heap_init_in_bytes: long - non_heap_max?: ByteSize - non_heap_max_in_bytes: long -} - -export interface NodesInfoNodeInfoMemory { - total: string - total_in_bytes: long -} - -export interface NodesInfoNodeInfoOSCPU { - cache_size: string - cache_size_in_bytes: integer - cores_per_socket: integer - mhz: integer - model: string - total_cores: integer - total_sockets: integer - vendor: string -} - -export interface NodesInfoNodeInfoPath { - logs?: string - home?: string - repo?: string[] - data?: string | string[] -} - -export interface NodesInfoNodeInfoRepositories { - url: NodesInfoNodeInfoRepositoriesUrl -} - -export interface NodesInfoNodeInfoRepositoriesUrl { - allowed_urls: string -} - -export interface NodesInfoNodeInfoScript { - allowed_types: string - disable_max_compilations_rate?: string -} - -export interface NodesInfoNodeInfoSearch { - remote: NodesInfoNodeInfoSearchRemote -} - -export interface NodesInfoNodeInfoSearchRemote { - connect: string -} - -export interface NodesInfoNodeInfoSettings { - cluster: NodesInfoNodeInfoSettingsCluster - node: NodesInfoNodeInfoSettingsNode - path?: NodesInfoNodeInfoPath - repositories?: NodesInfoNodeInfoRepositories - discovery?: NodesInfoNodeInfoDiscover - action?: NodesInfoNodeInfoAction - client?: NodesInfoNodeInfoClient - http: NodesInfoNodeInfoSettingsHttp - bootstrap?: NodesInfoNodeInfoBootstrap - transport: NodesInfoNodeInfoSettingsTransport - network?: NodesInfoNodeInfoSettingsNetwork - xpack?: NodesInfoNodeInfoXpack - script?: NodesInfoNodeInfoScript - search?: NodesInfoNodeInfoSearch - ingest?: NodesInfoNodeInfoSettingsIngest -} - -export interface NodesInfoNodeInfoSettingsCluster { - name: Name - routing?: IndicesIndexRouting - election: NodesInfoNodeInfoSettingsClusterElection - initial_master_nodes?: string[] | string - deprecation_indexing?: NodesInfoDeprecationIndexing -} - -export interface NodesInfoNodeInfoSettingsClusterElection { - strategy: Name -} - -export interface NodesInfoNodeInfoSettingsHttp { - type: NodesInfoNodeInfoSettingsHttpType | string - 'type.default'?: string - compression?: boolean | string - port?: integer | string -} - -export interface NodesInfoNodeInfoSettingsHttpType { - default: string -} - -export interface NodesInfoNodeInfoSettingsIngest { - attachment?: NodesInfoNodeInfoIngestInfo - append?: NodesInfoNodeInfoIngestInfo - csv?: NodesInfoNodeInfoIngestInfo - convert?: NodesInfoNodeInfoIngestInfo - date?: NodesInfoNodeInfoIngestInfo - date_index_name?: NodesInfoNodeInfoIngestInfo - dot_expander?: NodesInfoNodeInfoIngestInfo - enrich?: NodesInfoNodeInfoIngestInfo - fail?: NodesInfoNodeInfoIngestInfo - foreach?: NodesInfoNodeInfoIngestInfo - json?: NodesInfoNodeInfoIngestInfo - user_agent?: NodesInfoNodeInfoIngestInfo - kv?: NodesInfoNodeInfoIngestInfo - geoip?: NodesInfoNodeInfoIngestInfo - grok?: NodesInfoNodeInfoIngestInfo - gsub?: NodesInfoNodeInfoIngestInfo - join?: NodesInfoNodeInfoIngestInfo - lowercase?: NodesInfoNodeInfoIngestInfo - remove?: NodesInfoNodeInfoIngestInfo - rename?: NodesInfoNodeInfoIngestInfo - script?: NodesInfoNodeInfoIngestInfo - set?: NodesInfoNodeInfoIngestInfo - sort?: NodesInfoNodeInfoIngestInfo - split?: NodesInfoNodeInfoIngestInfo - trim?: NodesInfoNodeInfoIngestInfo - uppercase?: NodesInfoNodeInfoIngestInfo - urldecode?: NodesInfoNodeInfoIngestInfo - bytes?: NodesInfoNodeInfoIngestInfo - dissect?: NodesInfoNodeInfoIngestInfo - set_security_user?: NodesInfoNodeInfoIngestInfo - pipeline?: NodesInfoNodeInfoIngestInfo - drop?: NodesInfoNodeInfoIngestInfo - circle?: NodesInfoNodeInfoIngestInfo - inference?: NodesInfoNodeInfoIngestInfo -} - -export interface NodesInfoNodeInfoSettingsNetwork { - host?: Host | Host[] -} - -export interface NodesInfoNodeInfoSettingsNode { - name: Name - attr: Record - max_local_storage_nodes?: string -} - -export interface NodesInfoNodeInfoSettingsTransport { - type: NodesInfoNodeInfoSettingsTransportType | string - 'type.default'?: string - features?: NodesInfoNodeInfoSettingsTransportFeatures - ignore_deserialization_errors?: SpecUtilsStringified -} - -export interface NodesInfoNodeInfoSettingsTransportFeatures { - 'x-pack': string -} - -export interface NodesInfoNodeInfoSettingsTransportType { - default: string -} - -export interface NodesInfoNodeInfoTransport { - bound_address: string[] - publish_address: string - profiles: Record -} - -export interface NodesInfoNodeInfoXpack { - license?: NodesInfoNodeInfoXpackLicense - security: NodesInfoNodeInfoXpackSecurity - notification?: Record - ml?: NodesInfoNodeInfoXpackMl -} - -export interface NodesInfoNodeInfoXpackLicense { - self_generated: NodesInfoNodeInfoXpackLicenseType -} - -export interface NodesInfoNodeInfoXpackLicenseType { - type: string -} - -export interface NodesInfoNodeInfoXpackMl { - use_auto_machine_memory_percent?: boolean -} - -export interface NodesInfoNodeInfoXpackSecurity { - http?: NodesInfoNodeInfoXpackSecuritySsl - enabled: string - transport?: NodesInfoNodeInfoXpackSecuritySsl - authc?: NodesInfoNodeInfoXpackSecurityAuthc -} - -export interface NodesInfoNodeInfoXpackSecurityAuthc { - realms?: NodesInfoNodeInfoXpackSecurityAuthcRealms - token?: NodesInfoNodeInfoXpackSecurityAuthcToken -} - -export interface NodesInfoNodeInfoXpackSecurityAuthcRealms { - file?: Record - native?: Record - pki?: Record -} - -export interface NodesInfoNodeInfoXpackSecurityAuthcRealmsStatus { - enabled?: string - order: string -} - -export interface NodesInfoNodeInfoXpackSecurityAuthcToken { - enabled: string -} - -export interface NodesInfoNodeInfoXpackSecuritySsl { - ssl: Record -} - -export interface NodesInfoNodeJvmInfo { - gc_collectors: string[] - mem: NodesInfoNodeInfoJvmMemory - memory_pools: string[] - pid: integer - start_time_in_millis: EpochTime - version: VersionString - vm_name: Name - vm_vendor: string - vm_version: VersionString - using_bundled_jdk: boolean - using_compressed_ordinary_object_pointers?: boolean | string - input_arguments: string[] -} - -export interface NodesInfoNodeOperatingSystemInfo { - arch: string - available_processors: integer - allocated_processors?: integer - name: Name - pretty_name: Name - refresh_interval_in_millis: DurationValue - version: VersionString - cpu?: NodesInfoNodeInfoOSCPU - mem?: NodesInfoNodeInfoMemory - swap?: NodesInfoNodeInfoMemory -} - -export interface NodesInfoNodeProcessInfo { - id: long - mlockall: boolean - refresh_interval_in_millis: DurationValue -} - -export interface NodesInfoNodeThreadPoolInfo { - core?: integer - keep_alive?: Duration - max?: integer - queue_size: integer - size?: integer - type: string -} - -export interface NodesInfoRemoveClusterServer { - bound_address: TransportAddress[] - publish_address: TransportAddress -} - -export interface NodesInfoRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - flat_settings?: boolean - timeout?: Duration -} - -export type NodesInfoResponse = NodesInfoResponseBase - -export interface NodesInfoResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesReloadSecureSettingsRequest extends RequestBase { - node_id?: NodeIds - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - secure_settings_password?: Password - } -} - -export type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase - -export interface NodesReloadSecureSettingsResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface NodesStatsRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - index_metric?: Metrics - completion_fields?: Fields - fielddata_fields?: Fields - fields?: Fields - groups?: boolean - include_segment_file_sizes?: boolean - level?: Level - timeout?: Duration - types?: string[] - include_unloaded_segments?: boolean -} - -export type NodesStatsResponse = NodesStatsResponseBase - -export interface NodesStatsResponseBase extends NodesNodesResponseBase { - cluster_name?: Name - nodes: Record -} - -export interface NodesUsageNodeUsage { - rest_actions: Record - since: EpochTime - timestamp: EpochTime - aggregations: Record -} - -export interface NodesUsageRequest extends RequestBase { - node_id?: NodeIds - metric?: Metrics - timeout?: Duration -} - -export type NodesUsageResponse = NodesUsageResponseBase - -export interface NodesUsageResponseBase extends NodesNodesResponseBase { - cluster_name: Name - nodes: Record -} - -export interface QueryRulesQueryRule { - rule_id: Id - type: QueryRulesQueryRuleType - criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] - actions: QueryRulesQueryRuleActions - priority?: integer -} - -export interface QueryRulesQueryRuleActions { - ids?: Id[] - docs?: QueryDslPinnedDoc[] -} - -export interface QueryRulesQueryRuleCriteria { - type: QueryRulesQueryRuleCriteriaType - metadata?: string - values?: any[] -} - -export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' - -export type QueryRulesQueryRuleType = 'pinned' | 'exclude' - -export interface QueryRulesQueryRuleset { - ruleset_id: Id - rules: QueryRulesQueryRule[] -} - -export interface QueryRulesDeleteRuleRequest extends RequestBase { - ruleset_id: Id - rule_id: Id -} - -export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase - -export interface QueryRulesDeleteRulesetRequest extends RequestBase { - ruleset_id: Id -} - -export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase - -export interface QueryRulesGetRuleRequest extends RequestBase { - ruleset_id: Id - rule_id: Id -} - -export type QueryRulesGetRuleResponse = QueryRulesQueryRule - -export interface QueryRulesGetRulesetRequest extends RequestBase { - ruleset_id: Id -} - -export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset - -export interface QueryRulesListRulesetsQueryRulesetListItem { - ruleset_id: Id - rule_total_count: integer - rule_criteria_types_counts: Record - rule_type_counts: Record -} - -export interface QueryRulesListRulesetsRequest extends RequestBase { - from?: integer - size?: integer -} - -export interface QueryRulesListRulesetsResponse { - count: long - results: QueryRulesListRulesetsQueryRulesetListItem[] -} - -export interface QueryRulesPutRuleRequest extends RequestBase { - ruleset_id: Id - rule_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - type: QueryRulesQueryRuleType - criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] - actions: QueryRulesQueryRuleActions - priority?: integer - } -} - -export interface QueryRulesPutRuleResponse { - result: Result -} - -export interface QueryRulesPutRulesetRequest extends RequestBase { - ruleset_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - rules: QueryRulesQueryRule | QueryRulesQueryRule[] - } -} - -export interface QueryRulesPutRulesetResponse { - result: Result -} - -export interface QueryRulesTestQueryRulesetMatchedRule { - ruleset_id: Id - rule_id: Id -} - -export interface QueryRulesTestRequest extends RequestBase { - ruleset_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - match_criteria: Record - } -} - -export interface QueryRulesTestResponse { - total_matched_rules: integer - matched_rules: QueryRulesTestQueryRulesetMatchedRule[] -} - -export interface RollupDateHistogramGrouping { - delay?: Duration - field: Field - format?: string - interval?: Duration - calendar_interval?: Duration - fixed_interval?: Duration - time_zone?: TimeZone -} - -export interface RollupFieldMetric { - field: Field - metrics: RollupMetric[] -} - -export interface RollupGroupings { - date_histogram?: RollupDateHistogramGrouping - histogram?: RollupHistogramGrouping - terms?: RollupTermsGrouping -} - -export interface RollupHistogramGrouping { - fields: Fields - interval: long -} - -export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' - -export interface RollupTermsGrouping { - fields: Fields -} - -export interface RollupDeleteJobRequest extends RequestBase { - id: Id -} - -export interface RollupDeleteJobResponse { - acknowledged: boolean - task_failures?: TaskFailure[] -} - -export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' - -export interface RollupGetJobsRequest extends RequestBase { - id?: Id -} - -export interface RollupGetJobsResponse { - jobs: RollupGetJobsRollupJob[] -} - -export interface RollupGetJobsRollupJob { - config: RollupGetJobsRollupJobConfiguration - stats: RollupGetJobsRollupJobStats - status: RollupGetJobsRollupJobStatus -} - -export interface RollupGetJobsRollupJobConfiguration { - cron: string - groups: RollupGroupings - id: Id - index_pattern: string - metrics: RollupFieldMetric[] - page_size: long - rollup_index: IndexName - timeout: Duration -} - -export interface RollupGetJobsRollupJobStats { - documents_processed: long - index_failures: long - index_time_in_ms: DurationValue - index_total: long - pages_processed: long - rollups_indexed: long - search_failures: long - search_time_in_ms: DurationValue - search_total: long - trigger_count: long - processing_time_in_ms: DurationValue - processing_total: long -} - -export interface RollupGetJobsRollupJobStatus { - current_position?: Record - job_state: RollupGetJobsIndexingJobState - upgraded_doc_id?: boolean -} - -export interface RollupGetRollupCapsRequest extends RequestBase { - id?: Id -} - -export type RollupGetRollupCapsResponse = Record - -export interface RollupGetRollupCapsRollupCapabilities { - rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] -} - -export interface RollupGetRollupCapsRollupCapabilitySummary { - fields: Record - index_pattern: string - job_id: string - rollup_index: string -} - -export interface RollupGetRollupCapsRollupFieldSummary { - agg: string - calendar_interval?: Duration - time_zone?: TimeZone -} - -export interface RollupGetRollupIndexCapsIndexCapabilities { - rollup_jobs: RollupGetRollupIndexCapsRollupJobSummary[] -} - -export interface RollupGetRollupIndexCapsRequest extends RequestBase { - index: Ids -} - -export type RollupGetRollupIndexCapsResponse = Record - -export interface RollupGetRollupIndexCapsRollupJobSummary { - fields: Record - index_pattern: string - job_id: Id - rollup_index: IndexName -} - -export interface RollupGetRollupIndexCapsRollupJobSummaryField { - agg: string - time_zone?: TimeZone - calendar_interval?: Duration -} - -export interface RollupPutJobRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - cron: string - groups: RollupGroupings - index_pattern: string - metrics?: RollupFieldMetric[] - page_size: integer - rollup_index: IndexName - timeout?: Duration - headers?: HttpHeaders - } -} - -export type RollupPutJobResponse = AcknowledgedResponseBase - -export interface RollupRollupSearchRequest extends RequestBase { - index: Indices - rest_total_hits_as_int?: boolean - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - query?: QueryDslQueryContainer - size?: integer - } -} - -export interface RollupRollupSearchResponse> { - took: long - timed_out: boolean - terminated_early?: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - aggregations?: TAggregations -} - -export interface RollupStartJobRequest extends RequestBase { - id: Id -} - -export interface RollupStartJobResponse { - started: boolean -} - -export interface RollupStopJobRequest extends RequestBase { - id: Id - timeout?: Duration - wait_for_completion?: boolean -} - -export interface RollupStopJobResponse { - stopped: boolean -} - -export interface SearchApplicationAnalyticsCollection { - event_data_stream: SearchApplicationEventDataStream -} - -export interface SearchApplicationEventDataStream { - name: IndexName -} - -export type SearchApplicationEventType = 'page_view' | 'search' | 'search_click' - -export interface SearchApplicationSearchApplication extends SearchApplicationSearchApplicationParameters { - name: Name - updated_at_millis: EpochTime -} - -export interface SearchApplicationSearchApplicationParameters { - indices: IndexName[] - analytics_collection_name?: Name - template?: SearchApplicationSearchApplicationTemplate -} - -export interface SearchApplicationSearchApplicationTemplate { - script: Script | string -} - -export interface SearchApplicationDeleteRequest extends RequestBase { - name: Name -} - -export type SearchApplicationDeleteResponse = AcknowledgedResponseBase - -export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { - name: Name -} - -export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase - -export interface SearchApplicationGetRequest extends RequestBase { - name: Name -} - -export type SearchApplicationGetResponse = SearchApplicationSearchApplication - -export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { - name?: Name[] -} - -export type SearchApplicationGetBehavioralAnalyticsResponse = Record - -export interface SearchApplicationListRequest extends RequestBase { - q?: string - from?: integer - size?: integer -} - -export interface SearchApplicationListResponse { - count: long - results: SearchApplicationSearchApplication[] -} - -export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { - collection_name: Name - event_type: SearchApplicationEventType - debug?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'payload' instead. */ - body?: any -} - -export interface SearchApplicationPostBehavioralAnalyticsEventResponse { - accepted: boolean - event?: any -} - -export interface SearchApplicationPutRequest extends RequestBase { - name: Name - create?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'search_application' instead. */ - body?: SearchApplicationSearchApplicationParameters -} - -export interface SearchApplicationPutResponse { - result: Result -} - -export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { - name: Name -} - -export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { - name: Name -} - -export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase - -export interface SearchApplicationRenderQueryRequest extends RequestBase { - name: Name - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - params?: Record - } -} - -export interface SearchApplicationRenderQueryResponse { -} - -export interface SearchApplicationSearchRequest extends RequestBase { - name: Name - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - params?: Record - } -} - -export type SearchApplicationSearchResponse> = SearchResponseBody - -export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' - -export interface SearchableSnapshotsCacheStatsNode { - shared_cache: SearchableSnapshotsCacheStatsShared -} - -export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { - node_id?: NodeIds - master_timeout?: Duration -} - -export interface SearchableSnapshotsCacheStatsResponse { - nodes: Record -} - -export interface SearchableSnapshotsCacheStatsShared { - reads: long - bytes_read_in_bytes: ByteSize - writes: long - bytes_written_in_bytes: ByteSize - evictions: long - num_regions: integer - size_in_bytes: ByteSize - region_size_in_bytes: ByteSize -} - -export interface SearchableSnapshotsClearCacheRequest extends RequestBase { - index?: Indices - expand_wildcards?: ExpandWildcards - allow_no_indices?: boolean - ignore_unavailable?: boolean -} - -export type SearchableSnapshotsClearCacheResponse = any - -export interface SearchableSnapshotsMountMountedSnapshot { - snapshot: Name - indices: Indices - shards: ShardStatistics -} - -export interface SearchableSnapshotsMountRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Duration - wait_for_completion?: boolean - storage?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - index: IndexName - renamed_index?: IndexName - index_settings?: Record - ignore_index_settings?: string[] - } -} - -export interface SearchableSnapshotsMountResponse { - snapshot: SearchableSnapshotsMountMountedSnapshot -} - -export interface SearchableSnapshotsStatsRequest extends RequestBase { - index?: Indices - level?: SearchableSnapshotsStatsLevel -} - -export interface SearchableSnapshotsStatsResponse { - stats: any - total: any -} - -export interface SecurityAccess { - replication?: SecurityReplicationAccess[] - search?: SecuritySearchAccess[] -} - -export interface SecurityApiKey { - id: Id - name: Name - type: SecurityApiKeyType - creation: EpochTime - expiration?: EpochTime - invalidated: boolean - invalidation?: EpochTime - username: Username - realm: string - realm_type?: string - metadata: Metadata - role_descriptors?: Record - limited_by?: Record[] - access?: SecurityAccess - profile_uid?: string - _sort?: SortResults -} - -export type SecurityApiKeyType = 'rest' | 'cross_cluster' - -export interface SecurityApplicationGlobalUserPrivileges { - manage: SecurityManageUserPrivileges -} - -export interface SecurityApplicationPrivileges { - application: string - privileges: string[] - resources: string[] -} - -export interface SecurityBulkError { - count: integer - details: Record -} - -export interface SecurityClusterNode { - name: Name -} - -export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string - -export interface SecurityCreatedStatus { - created: boolean -} - -export interface SecurityFieldRule { - username?: Names - dn?: Names - groups?: Names -} - -export interface SecurityFieldSecurity { - except?: Fields - grant?: Fields -} - -export interface SecurityGlobalPrivilege { - application: SecurityApplicationGlobalUserPrivileges -} - -export type SecurityGrantType = 'password' | 'access_token' - -export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string - -export interface SecurityIndicesPrivileges { - field_security?: SecurityFieldSecurity - names: IndexName[] - privileges: SecurityIndexPrivilege[] - query?: SecurityIndicesPrivilegesQuery - allow_restricted_indices?: boolean -} - -export type SecurityIndicesPrivilegesQuery = string | QueryDslQueryContainer | SecurityRoleTemplateQuery - -export interface SecurityManageUserPrivileges { - applications: string[] -} - -export interface SecurityRealmInfo { - name: Name - type: string -} - -export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' - -export interface SecurityRemoteClusterPrivileges { - clusters: Names - privileges: SecurityRemoteClusterPrivilege[] -} - -export interface SecurityRemoteIndicesPrivileges { - clusters: Names - field_security?: SecurityFieldSecurity - names: IndexName[] - privileges: SecurityIndexPrivilege[] - query?: SecurityIndicesPrivilegesQuery - allow_restricted_indices?: boolean -} - -export interface SecurityRemoteUserIndicesPrivileges { - field_security?: SecurityFieldSecurity[] - names: IndexName | IndexName[] - privileges: SecurityIndexPrivilege[] - query?: SecurityIndicesPrivilegesQuery[] - allow_restricted_indices: boolean - clusters: string[] -} - -export interface SecurityReplicationAccess { - names: IndexName | IndexName[] - allow_restricted_indices?: boolean -} - -export interface SecurityRestriction { - workflows: SecurityRestrictionWorkflow[] -} - -export type SecurityRestrictionWorkflow = 'search_application_query' | string - -export interface SecurityRoleDescriptor { - cluster?: SecurityClusterPrivilege[] - indices?: SecurityIndicesPrivileges[] - index?: SecurityIndicesPrivileges[] - remote_indices?: SecurityRemoteIndicesPrivileges[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege - applications?: SecurityApplicationPrivileges[] - metadata?: Metadata - run_as?: string[] - description?: string - restriction?: SecurityRestriction - transient_metadata?: Record -} - -export interface SecurityRoleDescriptorRead { - cluster: SecurityClusterPrivilege[] - indices: SecurityIndicesPrivileges[] - index: SecurityIndicesPrivileges[] - remote_indices?: SecurityRemoteIndicesPrivileges[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege - applications?: SecurityApplicationPrivileges[] - metadata?: Metadata - run_as?: string[] - description?: string - restriction?: SecurityRestriction - transient_metadata?: Record -} - -export interface SecurityRoleMapping { - enabled: boolean - metadata: Metadata - roles?: string[] - role_templates?: SecurityRoleTemplate[] - rules: SecurityRoleMappingRule -} - -export interface SecurityRoleMappingRule { - any?: SecurityRoleMappingRule[] - all?: SecurityRoleMappingRule[] - field?: SecurityFieldRule - except?: SecurityRoleMappingRule -} - -export interface SecurityRoleTemplate { - format?: SecurityTemplateFormat - template: Script | string -} - -export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer - -export interface SecurityRoleTemplateQuery { - template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery -} - -export interface SecurityRoleTemplateScript { - source?: SecurityRoleTemplateInlineQuery - id?: Id - params?: Record - lang?: ScriptLanguage - options?: Record -} - -export interface SecuritySearchAccess { - field_security?: SecurityFieldSecurity - names: IndexName | IndexName[] - query?: SecurityIndicesPrivilegesQuery - allow_restricted_indices?: boolean -} - -export interface SecuritySecuritySettings { - index?: IndicesIndexSettings -} - -export type SecurityTemplateFormat = 'string' | 'json' - -export interface SecurityUser { - email?: string | null - full_name?: Name | null - metadata: Metadata - roles: string[] - username: Username - enabled: boolean - profile_uid?: SecurityUserProfileId -} - -export interface SecurityUserIndicesPrivileges { - field_security?: SecurityFieldSecurity[] - names: IndexName[] - privileges: SecurityIndexPrivilege[] - query?: SecurityIndicesPrivilegesQuery[] - allow_restricted_indices: boolean -} - -export interface SecurityUserProfile { - uid: SecurityUserProfileId - user: SecurityUserProfileUser - data: Record - labels: Record - enabled?: boolean -} - -export interface SecurityUserProfileHitMetadata { - _primary_term: long - _seq_no: SequenceNumber -} - -export type SecurityUserProfileId = string - -export interface SecurityUserProfileUser { - email?: string | null - full_name?: Name | null - realm_name: Name - realm_domain?: Name - roles: string[] - username: Username -} - -export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { - last_synchronized: long - _doc: SecurityUserProfileHitMetadata -} - -export interface SecurityActivateUserProfileRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - access_token?: string - grant_type: SecurityGrantType - password?: string - username?: string - } -} - -export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata - -export interface SecurityAuthenticateAuthenticateApiKey { - id: Id - name?: Name -} - -export interface SecurityAuthenticateRequest extends RequestBase { -} - -export interface SecurityAuthenticateResponse { - api_key?: SecurityAuthenticateAuthenticateApiKey - authentication_realm: SecurityRealmInfo - email?: string | null - full_name?: Name | null - lookup_realm: SecurityRealmInfo - metadata: Metadata - roles: string[] - username: Username - enabled: boolean - authentication_type: string - token?: SecurityAuthenticateToken -} - -export interface SecurityAuthenticateToken { - name: Name - type?: string -} - -export interface SecurityBulkDeleteRoleRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - names: string[] - } -} - -export interface SecurityBulkDeleteRoleResponse { - deleted?: string[] - not_found?: string[] - errors?: SecurityBulkError -} - -export interface SecurityBulkPutRoleRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - roles: Record - } -} - -export interface SecurityBulkPutRoleResponse { - created?: string[] - updated?: string[] - noop?: string[] - errors?: SecurityBulkError -} - -export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - expiration?: Duration - ids: string | string[] - metadata?: Metadata - role_descriptors?: Record - } -} - -export interface SecurityBulkUpdateApiKeysResponse { - errors?: SecurityBulkError - noops: string[] - updated: string[] -} - -export interface SecurityChangePasswordRequest extends RequestBase { - username?: Username - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - password?: Password - password_hash?: string - } -} - -export interface SecurityChangePasswordResponse { -} - -export interface SecurityClearApiKeyCacheRequest extends RequestBase { - ids: Ids -} - -export interface SecurityClearApiKeyCacheResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityClearCachedPrivilegesRequest extends RequestBase { - application: Name -} - -export interface SecurityClearCachedPrivilegesResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityClearCachedRealmsRequest extends RequestBase { - realms: Names - usernames?: string[] -} - -export interface SecurityClearCachedRealmsResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityClearCachedRolesRequest extends RequestBase { - name: Names -} - -export interface SecurityClearCachedRolesResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityClearCachedServiceTokensRequest extends RequestBase { - namespace: Namespace - service: Service - name: Names -} - -export interface SecurityClearCachedServiceTokensResponse { - _nodes: NodeStatistics - cluster_name: Name - nodes: Record -} - -export interface SecurityCreateApiKeyRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - expiration?: Duration - name?: Name - role_descriptors?: Record - metadata?: Metadata - } -} - -export interface SecurityCreateApiKeyResponse { - api_key: string - expiration?: long - id: Id - name: Name - encoded: string -} - -export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - access: SecurityAccess - expiration?: Duration - metadata?: Metadata - name: Name - } -} - -export interface SecurityCreateCrossClusterApiKeyResponse { - api_key: string - expiration?: DurationValue - id: Id - name: Name - encoded: string -} - -export interface SecurityCreateServiceTokenRequest extends RequestBase { - namespace: Namespace - service: Service - name?: Name - refresh?: Refresh -} - -export interface SecurityCreateServiceTokenResponse { - created: boolean - token: SecurityCreateServiceTokenToken -} - -export interface SecurityCreateServiceTokenToken { - name: Name - value: string -} - -export interface SecurityDelegatePkiAuthentication { - username: string - roles: string[] - full_name: string | null - email: string | null - token?: Record - metadata: Metadata - enabled: boolean - authentication_realm: SecurityDelegatePkiAuthenticationRealm - lookup_realm: SecurityDelegatePkiAuthenticationRealm - authentication_type: string - api_key?: Record -} - -export interface SecurityDelegatePkiAuthenticationRealm { - name: string - type: string - domain?: string -} - -export interface SecurityDelegatePkiRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - x509_certificate_chain: string[] - } -} - -export interface SecurityDelegatePkiResponse { - access_token: string - expires_in: long - type: string - authentication?: SecurityDelegatePkiAuthentication -} - -export interface SecurityDeletePrivilegesFoundStatus { - found: boolean -} - -export interface SecurityDeletePrivilegesRequest extends RequestBase { - application: Name - name: Names - refresh?: Refresh -} - -export type SecurityDeletePrivilegesResponse = Record> - -export interface SecurityDeleteRoleRequest extends RequestBase { - name: Name - refresh?: Refresh -} - -export interface SecurityDeleteRoleResponse { - found: boolean -} - -export interface SecurityDeleteRoleMappingRequest extends RequestBase { - name: Name - refresh?: Refresh -} - -export interface SecurityDeleteRoleMappingResponse { - found: boolean -} - -export interface SecurityDeleteServiceTokenRequest extends RequestBase { - namespace: Namespace - service: Service - name: Name - refresh?: Refresh -} - -export interface SecurityDeleteServiceTokenResponse { - found: boolean -} - -export interface SecurityDeleteUserRequest extends RequestBase { - username: Username - refresh?: Refresh -} - -export interface SecurityDeleteUserResponse { - found: boolean -} - -export interface SecurityDisableUserRequest extends RequestBase { - username: Username - refresh?: Refresh -} - -export interface SecurityDisableUserResponse { -} - -export interface SecurityDisableUserProfileRequest extends RequestBase { - uid: SecurityUserProfileId - refresh?: Refresh -} - -export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase - -export interface SecurityEnableUserRequest extends RequestBase { - username: Username - refresh?: Refresh -} - -export interface SecurityEnableUserResponse { -} - -export interface SecurityEnableUserProfileRequest extends RequestBase { - uid: SecurityUserProfileId - refresh?: Refresh -} - -export type SecurityEnableUserProfileResponse = AcknowledgedResponseBase - -export interface SecurityEnrollKibanaRequest extends RequestBase { -} - -export interface SecurityEnrollKibanaResponse { - token: SecurityEnrollKibanaToken - http_ca: string -} - -export interface SecurityEnrollKibanaToken { - name: string - value: string -} - -export interface SecurityEnrollNodeRequest extends RequestBase { -} - -export interface SecurityEnrollNodeResponse { - http_ca_key: string - http_ca_cert: string - transport_ca_cert: string - transport_key: string - transport_cert: string - nodes_addresses: string[] -} - -export interface SecurityGetApiKeyRequest extends RequestBase { - id?: Id - name?: Name - owner?: boolean - realm_name?: Name - username?: Username - with_limited_by?: boolean - active_only?: boolean - with_profile_uid?: boolean -} - -export interface SecurityGetApiKeyResponse { - api_keys: SecurityApiKey[] -} - -export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { -} - -export interface SecurityGetBuiltinPrivilegesResponse { - cluster: SecurityClusterPrivilege[] - index: IndexName[] - remote_cluster: SecurityRemoteClusterPrivilege[] -} - -export interface SecurityGetPrivilegesRequest extends RequestBase { - application?: Name - name?: Names -} - -export type SecurityGetPrivilegesResponse = Record> - -export interface SecurityGetRoleRequest extends RequestBase { - name?: Names -} - -export type SecurityGetRoleResponse = Record - -export interface SecurityGetRoleRole { - cluster: SecurityClusterPrivilege[] - indices: SecurityIndicesPrivileges[] - remote_indices?: SecurityRemoteIndicesPrivileges[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - metadata: Metadata - description?: string - run_as?: string[] - transient_metadata?: Record - applications: SecurityApplicationPrivileges[] - role_templates?: SecurityRoleTemplate[] - global?: Record>> -} - -export interface SecurityGetRoleMappingRequest extends RequestBase { - name?: Names -} - -export type SecurityGetRoleMappingResponse = Record - -export interface SecurityGetServiceAccountsRequest extends RequestBase { - namespace?: Namespace - service?: Service -} - -export type SecurityGetServiceAccountsResponse = Record - -export interface SecurityGetServiceAccountsRoleDescriptorWrapper { - role_descriptor: SecurityRoleDescriptorRead -} - -export interface SecurityGetServiceCredentialsNodesCredentials { - _nodes: NodeStatistics - file_tokens: Record -} - -export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { - nodes: string[] -} - -export interface SecurityGetServiceCredentialsRequest extends RequestBase { - namespace: Namespace - service: Name -} - -export interface SecurityGetServiceCredentialsResponse { - service_account: string - count: integer - tokens: Record - nodes_credentials: SecurityGetServiceCredentialsNodesCredentials -} - -export interface SecurityGetSettingsRequest extends RequestBase { - master_timeout?: Duration -} - -export interface SecurityGetSettingsResponse { - security: SecuritySecuritySettings - 'security-profile': SecuritySecuritySettings - 'security-tokens': SecuritySecuritySettings -} - -export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' - -export interface SecurityGetTokenAuthenticatedUser extends SecurityUser { - authentication_realm: SecurityGetTokenUserRealm - lookup_realm: SecurityGetTokenUserRealm - authentication_provider?: SecurityGetTokenAuthenticationProvider - authentication_type: string -} - -export interface SecurityGetTokenAuthenticationProvider { - type: string - name: Name -} - -export interface SecurityGetTokenRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - grant_type?: SecurityGetTokenAccessTokenGrantType - scope?: string - password?: Password - kerberos_ticket?: string - refresh_token?: string - username?: Username - } -} - -export interface SecurityGetTokenResponse { - access_token: string - expires_in: long - scope?: string - type: string - refresh_token?: string - kerberos_authentication_response_token?: string - authentication: SecurityGetTokenAuthenticatedUser -} - -export interface SecurityGetTokenUserRealm { - name: Name - type: string -} - -export interface SecurityGetUserRequest extends RequestBase { - username?: Username | Username[] - with_profile_uid?: boolean -} - -export type SecurityGetUserResponse = Record - -export interface SecurityGetUserPrivilegesRequest extends RequestBase { -} - -export interface SecurityGetUserPrivilegesResponse { - applications: SecurityApplicationPrivileges[] - cluster: string[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - global: SecurityGlobalPrivilege[] - indices: SecurityUserIndicesPrivileges[] - remote_indices?: SecurityRemoteUserIndicesPrivileges[] - run_as: string[] -} - -export interface SecurityGetUserProfileGetUserProfileErrors { - count: long - details: Record -} - -export interface SecurityGetUserProfileRequest extends RequestBase { - uid: SecurityUserProfileId | SecurityUserProfileId[] - data?: string | string[] -} - -export interface SecurityGetUserProfileResponse { - profiles: SecurityUserProfileWithMetadata[] - errors?: SecurityGetUserProfileGetUserProfileErrors -} - -export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' - -export interface SecurityGrantApiKeyGrantApiKey { - name: Name - expiration?: DurationLarge - role_descriptors?: Record | Record[] - metadata?: Metadata -} - -export interface SecurityGrantApiKeyRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - api_key: SecurityGrantApiKeyGrantApiKey - grant_type: SecurityGrantApiKeyApiKeyGrantType - access_token?: string - username?: Username - password?: Password - run_as?: Username - } -} - -export interface SecurityGrantApiKeyResponse { - api_key: string - id: Id - name: Name - expiration?: EpochTime - encoded: string -} - -export interface SecurityHasPrivilegesApplicationPrivilegesCheck { - application: string - privileges: string[] - resources: string[] -} - -export type SecurityHasPrivilegesApplicationsPrivileges = Record - -export interface SecurityHasPrivilegesIndexPrivilegesCheck { - names: Indices - privileges: SecurityIndexPrivilege[] - allow_restricted_indices?: boolean -} - -export type SecurityHasPrivilegesPrivileges = Record - -export interface SecurityHasPrivilegesRequest extends RequestBase { - user?: Name - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] - cluster?: SecurityClusterPrivilege[] - index?: SecurityHasPrivilegesIndexPrivilegesCheck[] - } -} - -export type SecurityHasPrivilegesResourcePrivileges = Record - -export interface SecurityHasPrivilegesResponse { - application: SecurityHasPrivilegesApplicationsPrivileges - cluster: Record - has_all_requested: boolean - index: Record - username: Username -} - -export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors { - count: long - details: Record -} - -export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { - application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] - cluster?: SecurityClusterPrivilege[] - index?: SecurityHasPrivilegesIndexPrivilegesCheck[] -} - -export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - uids: SecurityUserProfileId[] - privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck - } -} - -export interface SecurityHasPrivilegesUserProfileResponse { - has_privilege_uids: SecurityUserProfileId[] - errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors -} - -export interface SecurityInvalidateApiKeyRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - id?: Id - ids?: Id[] - name?: Name - owner?: boolean - realm_name?: string - username?: Username - } -} - -export interface SecurityInvalidateApiKeyResponse { - error_count: integer - error_details?: ErrorCause[] - invalidated_api_keys: string[] - previously_invalidated_api_keys: string[] -} - -export interface SecurityInvalidateTokenRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - token?: string - refresh_token?: string - realm_name?: Name - username?: Username - } -} - -export interface SecurityInvalidateTokenResponse { - error_count: long - error_details?: ErrorCause[] - invalidated_tokens: long - previously_invalidated_tokens: long -} - -export interface SecurityOidcAuthenticateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - nonce: string - realm?: string - redirect_uri: string - state: string - } -} - -export interface SecurityOidcAuthenticateResponse { - access_token: string - expires_in: integer - refresh_token: string - type: string -} - -export interface SecurityOidcLogoutRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - token: string - refresh_token?: string - } -} - -export interface SecurityOidcLogoutResponse { - redirect: string -} - -export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - iss?: string - login_hint?: string - nonce?: string - realm?: string - state?: string - } -} - -export interface SecurityOidcPrepareAuthenticationResponse { - nonce: string - realm: string - redirect: string - state: string -} - -export interface SecurityPutPrivilegesActions { - actions: string[] - application?: string - name?: Name - metadata?: Metadata -} - -export interface SecurityPutPrivilegesRequest extends RequestBase { - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, use 'privileges' instead. */ - body?: Record> -} - -export type SecurityPutPrivilegesResponse = Record> - -export interface SecurityPutRoleRequest extends RequestBase { - name: Name - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - applications?: SecurityApplicationPrivileges[] - cluster?: SecurityClusterPrivilege[] - global?: Record - indices?: SecurityIndicesPrivileges[] - remote_indices?: SecurityRemoteIndicesPrivileges[] - remote_cluster?: SecurityRemoteClusterPrivileges[] - metadata?: Metadata - run_as?: string[] - description?: string - transient_metadata?: Record - } -} - -export interface SecurityPutRoleResponse { - role: SecurityCreatedStatus -} - -export interface SecurityPutRoleMappingRequest extends RequestBase { - name: Name - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - enabled?: boolean - metadata?: Metadata - roles?: string[] - role_templates?: SecurityRoleTemplate[] - rules?: SecurityRoleMappingRule - run_as?: string[] - } -} - -export interface SecurityPutRoleMappingResponse { - created?: boolean - role_mapping: SecurityCreatedStatus -} - -export interface SecurityPutUserRequest extends RequestBase { - username: Username - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - username?: Username - email?: string | null - full_name?: string | null - metadata?: Metadata - password?: Password - password_hash?: string - roles?: string[] - enabled?: boolean - } -} - -export interface SecurityPutUserResponse { - created: boolean -} - -export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate - -export interface SecurityQueryApiKeysApiKeyAggregationContainer { - aggregations?: Record - aggs?: Record - meta?: Metadata - cardinality?: AggregationsCardinalityAggregation - composite?: AggregationsCompositeAggregation - date_range?: AggregationsDateRangeAggregation - filter?: SecurityQueryApiKeysApiKeyQueryContainer - filters?: SecurityQueryApiKeysApiKeyFiltersAggregation - missing?: AggregationsMissingAggregation - range?: AggregationsRangeAggregation - terms?: AggregationsTermsAggregation - value_count?: AggregationsValueCountAggregation -} - -export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { - filters?: AggregationsBuckets - other_bucket?: boolean - other_bucket_key?: string - keyed?: boolean -} - -export interface SecurityQueryApiKeysApiKeyQueryContainer { - bool?: QueryDslBoolQuery - exists?: QueryDslExistsQuery - ids?: QueryDslIdsQuery - match?: Partial> - match_all?: QueryDslMatchAllQuery - prefix?: Partial> - range?: Partial> - simple_query_string?: QueryDslSimpleQueryStringQuery - term?: Partial> - terms?: QueryDslTermsQuery - wildcard?: Partial> -} - -export interface SecurityQueryApiKeysRequest extends RequestBase { - with_limited_by?: boolean - with_profile_uid?: boolean - typed_keys?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - aggregations?: Record - /** @alias aggregations */ - aggs?: Record - query?: SecurityQueryApiKeysApiKeyQueryContainer - from?: integer - sort?: Sort - size?: integer - search_after?: SortResults - } -} - -export interface SecurityQueryApiKeysResponse { - total: integer - count: integer - api_keys: SecurityApiKey[] - aggregations?: Record -} - -export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { - _sort?: SortResults - name: string -} - -export interface SecurityQueryRoleRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: SecurityQueryRoleRoleQueryContainer - from?: integer - sort?: Sort - size?: integer - search_after?: SortResults - } -} - -export interface SecurityQueryRoleResponse { - total: integer - count: integer - roles: SecurityQueryRoleQueryRole[] -} - -export interface SecurityQueryRoleRoleQueryContainer { - bool?: QueryDslBoolQuery - exists?: QueryDslExistsQuery - ids?: QueryDslIdsQuery - match?: Partial> - match_all?: QueryDslMatchAllQuery - prefix?: Partial> - range?: Partial> - simple_query_string?: QueryDslSimpleQueryStringQuery - term?: Partial> - terms?: QueryDslTermsQuery - wildcard?: Partial> -} - -export interface SecurityQueryUserQueryUser extends SecurityUser { - _sort?: SortResults -} - -export interface SecurityQueryUserRequest extends RequestBase { - with_profile_uid?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - query?: SecurityQueryUserUserQueryContainer - from?: integer - sort?: Sort - size?: integer - search_after?: SortResults - } -} - -export interface SecurityQueryUserResponse { - total: integer - count: integer - users: SecurityQueryUserQueryUser[] -} - -export interface SecurityQueryUserUserQueryContainer { - ids?: QueryDslIdsQuery - bool?: QueryDslBoolQuery - exists?: QueryDslExistsQuery - match?: Partial> - match_all?: QueryDslMatchAllQuery - prefix?: Partial> - range?: Partial> - simple_query_string?: QueryDslSimpleQueryStringQuery - term?: Partial> - terms?: QueryDslTermsQuery - wildcard?: Partial> -} - -export interface SecuritySamlAuthenticateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - content: string - ids: Ids - realm?: string - } -} - -export interface SecuritySamlAuthenticateResponse { - access_token: string - username: string - expires_in: integer - refresh_token: string - realm: string -} - -export interface SecuritySamlCompleteLogoutRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - realm: string - ids: Ids - query_string?: string - content?: string - } -} - -export type SecuritySamlCompleteLogoutResponse = boolean - -export interface SecuritySamlInvalidateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - acs?: string - query_string: string - realm?: string - } -} - -export interface SecuritySamlInvalidateResponse { - invalidated: integer - realm: string - redirect: string -} - -export interface SecuritySamlLogoutRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - token: string - refresh_token?: string - } -} - -export interface SecuritySamlLogoutResponse { - redirect: string -} - -export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - acs?: string - realm?: string - relay_state?: string - } -} - -export interface SecuritySamlPrepareAuthenticationResponse { - id: Id - realm: string - redirect: string -} - -export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { - realm_name: Name -} - -export interface SecuritySamlServiceProviderMetadataResponse { - metadata: string -} - -export interface SecuritySuggestUserProfilesHint { - uids?: SecurityUserProfileId[] - labels?: Record -} - -export interface SecuritySuggestUserProfilesRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - name?: string - size?: long - data?: string | string[] - hint?: SecuritySuggestUserProfilesHint - } -} - -export interface SecuritySuggestUserProfilesResponse { - total: SecuritySuggestUserProfilesTotalUserProfiles - took: long - profiles: SecurityUserProfile[] -} - -export interface SecuritySuggestUserProfilesTotalUserProfiles { - value: long - relation: RelationName -} - -export interface SecurityUpdateApiKeyRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - role_descriptors?: Record - metadata?: Metadata - expiration?: Duration - } -} - -export interface SecurityUpdateApiKeyResponse { - updated: boolean -} - -export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - access: SecurityAccess - expiration?: Duration - metadata?: Metadata - } -} - -export interface SecurityUpdateCrossClusterApiKeyResponse { - updated: boolean -} - -export interface SecurityUpdateSettingsRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - security?: SecuritySecuritySettings - 'security-profile'?: SecuritySecuritySettings - 'security-tokens'?: SecuritySecuritySettings - } -} - -export interface SecurityUpdateSettingsResponse { - acknowledged: boolean -} - -export interface SecurityUpdateUserProfileDataRequest extends RequestBase { - uid: SecurityUserProfileId - if_seq_no?: SequenceNumber - if_primary_term?: long - refresh?: Refresh - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - labels?: Record - data?: Record - } -} - -export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase - -export type ShutdownType = 'restart' | 'remove' | 'replace' - -export interface ShutdownDeleteNodeRequest extends RequestBase { - node_id: NodeId - master_timeout?: TimeUnit - timeout?: TimeUnit -} - -export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase - -export interface ShutdownGetNodeNodeShutdownStatus { - node_id: NodeId - type: ShutdownGetNodeShutdownType - reason: string - shutdown_startedmillis: EpochTime - status: ShutdownGetNodeShutdownStatus - shard_migration: ShutdownGetNodeShardMigrationStatus - persistent_tasks: ShutdownGetNodePersistentTaskStatus - plugins: ShutdownGetNodePluginsStatus -} - -export interface ShutdownGetNodePersistentTaskStatus { - status: ShutdownGetNodeShutdownStatus -} - -export interface ShutdownGetNodePluginsStatus { - status: ShutdownGetNodeShutdownStatus -} - -export interface ShutdownGetNodeRequest extends RequestBase { - node_id?: NodeIds - master_timeout?: TimeUnit -} - -export interface ShutdownGetNodeResponse { - nodes: ShutdownGetNodeNodeShutdownStatus[] -} - -export interface ShutdownGetNodeShardMigrationStatus { - status: ShutdownGetNodeShutdownStatus -} - -export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'stalled' | 'complete' - -export type ShutdownGetNodeShutdownType = 'remove' | 'restart' - -export interface ShutdownPutNodeRequest extends RequestBase { - node_id: NodeId - master_timeout?: TimeUnit - timeout?: TimeUnit - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - type: ShutdownType - reason: string - allocation_delay?: string - target_node_name?: string - } -} - -export type ShutdownPutNodeResponse = AcknowledgedResponseBase - -export interface SimulateIngestIngestDocumentSimulationKeys { - _id: Id - _index: IndexName - _source: Record - _version: SpecUtilsStringified - executed_pipelines: string[] - ignored_fields?: Record[] - error?: ErrorCause -} -export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys -& { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause } - -export interface SimulateIngestRequest extends RequestBase { - index?: IndexName - pipeline?: PipelineName - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - docs: IngestDocument[] - component_template_substitutions?: Record - index_template_substitutions?: Record - mapping_addition?: MappingTypeMapping - pipeline_substitutions?: Record - } -} - -export interface SimulateIngestResponse { - docs: SimulateIngestSimulateIngestDocumentResult[] -} - -export interface SimulateIngestSimulateIngestDocumentResult { - doc?: SimulateIngestIngestDocumentSimulation -} - -export interface SlmConfiguration { - ignore_unavailable?: boolean - indices?: Indices - include_global_state?: boolean - feature_states?: string[] - metadata?: Metadata - partial?: boolean -} - -export interface SlmInProgress { - name: Name - start_time_millis: EpochTime - state: string - uuid: Uuid -} - -export interface SlmInvocation { - snapshot_name: Name - time: DateTime -} - -export interface SlmPolicy { - config?: SlmConfiguration - name: Name - repository: string - retention?: SlmRetention - schedule: WatcherCronExpression -} - -export interface SlmRetention { - expire_after: Duration - max_count: integer - min_count: integer -} - -export interface SlmSnapshotLifecycle { - in_progress?: SlmInProgress - last_failure?: SlmInvocation - last_success?: SlmInvocation - modified_date?: DateTime - modified_date_millis: EpochTime - next_execution?: DateTime - next_execution_millis: EpochTime - policy: SlmPolicy - version: VersionNumber - stats: SlmStatistics -} - -export interface SlmSnapshotPolicyStats { - policy: string - snapshots_taken: long - snapshots_failed: long - snapshots_deleted: long - snapshot_deletion_failures: long -} - -export interface SlmStatistics { - retention_deletion_time?: Duration - retention_deletion_time_millis?: DurationValue - retention_failed?: long - retention_runs?: long - retention_timed_out?: long - policy?: Id - total_snapshots_deleted?: long - snapshots_deleted?: long - total_snapshot_deletion_failures?: long - snapshot_deletion_failures?: long - total_snapshots_failed?: long - snapshots_failed?: long - total_snapshots_taken?: long - snapshots_taken?: long -} - -export interface SlmDeleteLifecycleRequest extends RequestBase { - policy_id: Name - master_timeout?: Duration - timeout?: Duration -} - -export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase - -export interface SlmExecuteLifecycleRequest extends RequestBase { - policy_id: Name - master_timeout?: Duration - timeout?: Duration -} - -export interface SlmExecuteLifecycleResponse { - snapshot_name: Name -} - -export interface SlmExecuteRetentionRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export type SlmExecuteRetentionResponse = AcknowledgedResponseBase - -export interface SlmGetLifecycleRequest extends RequestBase { - policy_id?: Names - master_timeout?: Duration - timeout?: Duration -} - -export type SlmGetLifecycleResponse = Record - -export interface SlmGetStatsRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export interface SlmGetStatsResponse { - retention_deletion_time: Duration - retention_deletion_time_millis: DurationValue - retention_failed: long - retention_runs: long - retention_timed_out: long - total_snapshots_deleted: long - total_snapshot_deletion_failures: long - total_snapshots_failed: long - total_snapshots_taken: long - policy_stats: SlmSnapshotPolicyStats[] -} - -export interface SlmGetStatusRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export interface SlmGetStatusResponse { - operation_mode: LifecycleOperationMode -} - -export interface SlmPutLifecycleRequest extends RequestBase { - policy_id: Name - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - config?: SlmConfiguration - name?: Name - repository?: string - retention?: SlmRetention - schedule?: WatcherCronExpression - } -} - -export type SlmPutLifecycleResponse = AcknowledgedResponseBase - -export interface SlmStartRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export type SlmStartResponse = AcknowledgedResponseBase - -export interface SlmStopRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration -} - -export type SlmStopResponse = AcknowledgedResponseBase - -export interface SnapshotAzureRepository extends SnapshotRepositoryBase { - type: 'azure' - settings: SnapshotAzureRepositorySettings -} - -export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { - client?: string - container?: string - base_path?: string - readonly?: boolean - location_mode?: string -} - -export interface SnapshotFileCountSnapshotStats { - file_count: integer - size_in_bytes: long -} - -export interface SnapshotGcsRepository extends SnapshotRepositoryBase { - type: 'gcs' - settings: SnapshotGcsRepositorySettings -} - -export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { - bucket: string - client?: string - base_path?: string - readonly?: boolean - application_name?: string -} - -export interface SnapshotIndexDetails { - shard_count: integer - size?: ByteSize - size_in_bytes: long - max_segments_per_shard: long -} - -export interface SnapshotInfoFeatureState { - feature_name: string - indices: Indices -} - -export interface SnapshotReadOnlyUrlRepository extends SnapshotRepositoryBase { - type: 'url' - settings: SnapshotReadOnlyUrlRepositorySettings -} - -export interface SnapshotReadOnlyUrlRepositorySettings extends SnapshotRepositorySettingsBase { - http_max_retries?: integer - http_socket_timeout?: Duration - max_number_of_snapshots?: integer - url: string -} - -export type SnapshotRepository = SnapshotAzureRepository | SnapshotGcsRepository | SnapshotS3Repository | SnapshotSharedFileSystemRepository | SnapshotReadOnlyUrlRepository | SnapshotSourceOnlyRepository - -export interface SnapshotRepositoryBase { - uuid?: Uuid -} - -export interface SnapshotRepositorySettingsBase { - chunk_size?: ByteSize - compress?: boolean - max_restore_bytes_per_sec?: ByteSize - max_snapshot_bytes_per_sec?: ByteSize -} - -export interface SnapshotS3Repository extends SnapshotRepositoryBase { - type: 's3' - settings: SnapshotS3RepositorySettings -} - -export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { - bucket: string - client?: string - base_path?: string - readonly?: boolean - server_side_encryption?: boolean - buffer_size?: ByteSize - canned_acl?: string - storage_class?: string -} - -export interface SnapshotShardsStats { - done: long - failed: long - finalizing: long - initializing: long - started: long - total: long -} - -export type SnapshotShardsStatsStage = 'DONE' | 'FAILURE' | 'FINALIZE' | 'INIT' | 'STARTED' - -export interface SnapshotShardsStatsSummary { - incremental: SnapshotShardsStatsSummaryItem - total: SnapshotShardsStatsSummaryItem - start_time_in_millis: EpochTime - time?: Duration - time_in_millis: DurationValue -} - -export interface SnapshotShardsStatsSummaryItem { - file_count: long - size_in_bytes: long -} - -export interface SnapshotSharedFileSystemRepository extends SnapshotRepositoryBase { - type: 'fs' - settings: SnapshotSharedFileSystemRepositorySettings -} - -export interface SnapshotSharedFileSystemRepositorySettings extends SnapshotRepositorySettingsBase { - location: string - max_number_of_snapshots?: integer - readonly?: boolean -} - -export interface SnapshotSnapshotIndexStats { - shards: Record - shards_stats: SnapshotShardsStats - stats: SnapshotSnapshotStats -} - -export interface SnapshotSnapshotInfo { - data_streams: string[] - duration?: Duration - duration_in_millis?: DurationValue - end_time?: DateTime - end_time_in_millis?: EpochTime - failures?: SnapshotSnapshotShardFailure[] - include_global_state?: boolean - indices?: IndexName[] - index_details?: Record - metadata?: Metadata - reason?: string - repository?: Name - snapshot: Name - shards?: ShardStatistics - start_time?: DateTime - start_time_in_millis?: EpochTime - state?: string - uuid: Uuid - version?: VersionString - version_id?: VersionNumber - feature_states?: SnapshotInfoFeatureState[] -} - -export interface SnapshotSnapshotShardFailure { - index: IndexName - node_id?: Id - reason: string - shard_id: integer - index_uuid: Id - status: string -} - -export interface SnapshotSnapshotShardsStatus { - stage: SnapshotShardsStatsStage - stats: SnapshotShardsStatsSummary -} - -export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' - -export interface SnapshotSnapshotStats { - incremental: SnapshotFileCountSnapshotStats - start_time_in_millis: EpochTime - time?: Duration - time_in_millis: DurationValue - total: SnapshotFileCountSnapshotStats -} - -export interface SnapshotSourceOnlyRepository extends SnapshotRepositoryBase { - type: 'source' - settings: SnapshotSourceOnlyRepositorySettings -} - -export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepositorySettingsBase { - delegate_type?: string - max_number_of_snapshots?: integer - read_only?: boolean - readonly?: boolean -} - -export interface SnapshotStatus { - include_global_state: boolean - indices: Record - repository: string - shards_stats: SnapshotShardsStats - snapshot: string - state: string - stats: SnapshotSnapshotStats - uuid: Uuid -} - -export interface SnapshotCleanupRepositoryCleanupRepositoryResults { - deleted_blobs: long - deleted_bytes: long -} - -export interface SnapshotCleanupRepositoryRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export interface SnapshotCleanupRepositoryResponse { - results: SnapshotCleanupRepositoryCleanupRepositoryResults -} - -export interface SnapshotCloneRequest extends RequestBase { - repository: Name - snapshot: Name - target_snapshot: Name - master_timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - indices: string - } -} - -export type SnapshotCloneResponse = AcknowledgedResponseBase - -export interface SnapshotCreateRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Duration - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - ignore_unavailable?: boolean - include_global_state?: boolean - indices?: Indices - feature_states?: string[] - metadata?: Metadata - partial?: boolean - } -} - -export interface SnapshotCreateResponse { - accepted?: boolean - snapshot?: SnapshotSnapshotInfo -} - -export interface SnapshotCreateRepositoryRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration - verify?: boolean - /** @deprecated The use of the 'body' key has been deprecated, use 'repository' instead. */ - body?: SnapshotRepository -} - -export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase - -export interface SnapshotDeleteRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Duration - wait_for_completion?: boolean -} - -export type SnapshotDeleteResponse = AcknowledgedResponseBase - -export interface SnapshotDeleteRepositoryRequest extends RequestBase { - name: Names - master_timeout?: Duration - timeout?: Duration -} - -export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase - -export interface SnapshotGetRequest extends RequestBase { - repository: Name - snapshot: Names - ignore_unavailable?: boolean - master_timeout?: Duration - verbose?: boolean - index_details?: boolean - index_names?: boolean - include_repository?: boolean - sort?: SnapshotSnapshotSort - size?: integer - order?: SortOrder - after?: string - offset?: integer - from_sort_value?: string - slm_policy_filter?: Name -} - -export interface SnapshotGetResponse { - responses?: SnapshotGetSnapshotResponseItem[] - snapshots?: SnapshotSnapshotInfo[] - total: integer - remaining: integer -} - -export interface SnapshotGetSnapshotResponseItem { - repository: Name - snapshots?: SnapshotSnapshotInfo[] - error?: ErrorCause -} - -export interface SnapshotGetRepositoryRequest extends RequestBase { - name?: Names - local?: boolean - master_timeout?: Duration -} - -export type SnapshotGetRepositoryResponse = Record - -export interface SnapshotRepositoryAnalyzeBlobDetails { - name: string - overwritten: boolean - read_early: boolean - read_end: long - read_start: long - reads: SnapshotRepositoryAnalyzeReadBlobDetails - size: ByteSize - size_bytes: long -} - -export interface SnapshotRepositoryAnalyzeDetailsInfo { - blob: SnapshotRepositoryAnalyzeBlobDetails - overwrite_elapsed?: Duration - overwrite_elapsed_nanos?: DurationValue - write_elapsed: Duration - write_elapsed_nanos: DurationValue - write_throttled: Duration - write_throttled_nanos: DurationValue - writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo -} - -export interface SnapshotRepositoryAnalyzeReadBlobDetails { - before_write_complete?: boolean - elapsed?: Duration - elapsed_nanos?: DurationValue - first_byte_time?: Duration - first_byte_time_nanos: DurationValue - found: boolean - node: SnapshotRepositoryAnalyzeSnapshotNodeInfo - throttled?: Duration - throttled_nanos?: DurationValue -} - -export interface SnapshotRepositoryAnalyzeReadSummaryInfo { - count: integer - max_wait: Duration - max_wait_nanos: DurationValue - total_elapsed: Duration - total_elapsed_nanos: DurationValue - total_size: ByteSize - total_size_bytes: long - total_throttled: Duration - total_throttled_nanos: DurationValue - total_wait: Duration - total_wait_nanos: DurationValue -} - -export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { - name: Name - blob_count?: integer - concurrency?: integer - detailed?: boolean - early_read_node_count?: integer - max_blob_size?: ByteSize - max_total_data_size?: ByteSize - rare_action_probability?: double - rarely_abort_writes?: boolean - read_node_count?: integer - register_operation_count?: integer - seed?: integer - timeout?: Duration -} - -export interface SnapshotRepositoryAnalyzeResponse { - blob_count: integer - blob_path: string - concurrency: integer - coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo - delete_elapsed: Duration - delete_elapsed_nanos: DurationValue - details: SnapshotRepositoryAnalyzeDetailsInfo - early_read_node_count: integer - issues_detected: string[] - listing_elapsed: Duration - listing_elapsed_nanos: DurationValue - max_blob_size: ByteSize - max_blob_size_bytes: long - max_total_data_size: ByteSize - max_total_data_size_bytes: long - rare_action_probability: double - read_node_count: integer - repository: string - seed: long - summary: SnapshotRepositoryAnalyzeSummaryInfo -} - -export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { - id: Id - name: Name -} - -export interface SnapshotRepositoryAnalyzeSummaryInfo { - read: SnapshotRepositoryAnalyzeReadSummaryInfo - write: SnapshotRepositoryAnalyzeWriteSummaryInfo -} - -export interface SnapshotRepositoryAnalyzeWriteSummaryInfo { - count: integer - total_elapsed: Duration - total_elapsed_nanos: DurationValue - total_size: ByteSize - total_size_bytes: long - total_throttled: Duration - total_throttled_nanos: long -} - -export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { - name: Names - meta_thread_pool_concurrency?: integer - blob_thread_pool_concurrency?: integer - snapshot_verification_concurrency?: integer - index_verification_concurrency?: integer - index_snapshot_verification_concurrency?: integer - max_failed_shard_snapshots?: integer - verify_blob_contents?: boolean - max_bytes_per_sec?: string -} - -export type SnapshotRepositoryVerifyIntegrityResponse = any - -export interface SnapshotRestoreRequest extends RequestBase { - repository: Name - snapshot: Name - master_timeout?: Duration - wait_for_completion?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - feature_states?: string[] - ignore_index_settings?: string[] - ignore_unavailable?: boolean - include_aliases?: boolean - include_global_state?: boolean - index_settings?: IndicesIndexSettings - indices?: Indices - partial?: boolean - rename_pattern?: string - rename_replacement?: string - } -} - -export interface SnapshotRestoreResponse { - accepted?: boolean - snapshot?: SnapshotRestoreSnapshotRestore -} - -export interface SnapshotRestoreSnapshotRestore { - indices: IndexName[] - snapshot: string - shards: ShardStatistics -} - -export interface SnapshotStatusRequest extends RequestBase { - repository?: Name - snapshot?: Names - ignore_unavailable?: boolean - master_timeout?: Duration -} - -export interface SnapshotStatusResponse { - snapshots: SnapshotStatus[] -} - -export interface SnapshotVerifyRepositoryCompactNodeInfo { - name: Name -} - -export interface SnapshotVerifyRepositoryRequest extends RequestBase { - name: Name - master_timeout?: Duration - timeout?: Duration -} - -export interface SnapshotVerifyRepositoryResponse { - nodes: Record -} - -export interface SqlColumn { - name: Name - type: string -} - -export type SqlRow = any[] - -export interface SqlClearCursorRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - cursor: string - } -} - -export interface SqlClearCursorResponse { - succeeded: boolean -} - -export interface SqlDeleteAsyncRequest extends RequestBase { - id: Id -} - -export type SqlDeleteAsyncResponse = AcknowledgedResponseBase - -export interface SqlGetAsyncRequest extends RequestBase { - id: Id - delimiter?: string - format?: string - keep_alive?: Duration - wait_for_completion_timeout?: Duration -} - -export interface SqlGetAsyncResponse { - id: Id - is_running: boolean - is_partial: boolean - columns?: SqlColumn[] - cursor?: string - rows: SqlRow[] -} - -export interface SqlGetAsyncStatusRequest extends RequestBase { - id: Id -} - -export interface SqlGetAsyncStatusResponse { - expiration_time_in_millis: EpochTime - id: string - is_running: boolean - is_partial: boolean - start_time_in_millis: EpochTime - completion_status?: uint -} - -export interface SqlQueryRequest extends RequestBase { - format?: SqlQuerySqlFormat - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - allow_partial_search_results?: boolean - catalog?: string - columnar?: boolean - cursor?: string - fetch_size?: integer - field_multi_value_leniency?: boolean - filter?: QueryDslQueryContainer - index_using_frozen?: boolean - keep_alive?: Duration - keep_on_completion?: boolean - page_timeout?: Duration - params?: any[] - query?: string - request_timeout?: Duration - runtime_mappings?: MappingRuntimeFields - time_zone?: TimeZone - wait_for_completion_timeout?: Duration - } -} - -export interface SqlQueryResponse { - columns?: SqlColumn[] - cursor?: string - id?: Id - is_running?: boolean - is_partial?: boolean - rows: SqlRow[] -} - -export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' - -export interface SqlTranslateRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - fetch_size?: integer - filter?: QueryDslQueryContainer - query: string - time_zone?: TimeZone - } -} - -export interface SqlTranslateResponse { - aggregations?: Record - size?: long - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - query?: QueryDslQueryContainer - sort?: Sort -} - -export interface SslCertificatesCertificateInformation { - alias: string | null - expiry: DateTime - format: string - has_private_key: boolean - issuer?: string - path: string - serial_number: string - subject_dn: string -} - -export interface SslCertificatesRequest extends RequestBase { -} - -export type SslCertificatesResponse = SslCertificatesCertificateInformation[] - -export interface SynonymsSynonymRule { - id?: Id - synonyms: SynonymsSynonymString -} - -export interface SynonymsSynonymRuleRead { - id: Id - synonyms: SynonymsSynonymString -} - -export type SynonymsSynonymString = string - -export interface SynonymsSynonymsUpdateResult { - result: Result - reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult -} - -export interface SynonymsDeleteSynonymRequest extends RequestBase { - id: Id -} - -export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase - -export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { - set_id: Id - rule_id: Id -} - -export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult - -export interface SynonymsGetSynonymRequest extends RequestBase { - id: Id - from?: integer - size?: integer -} - -export interface SynonymsGetSynonymResponse { - count: integer - synonyms_set: SynonymsSynonymRuleRead[] -} - -export interface SynonymsGetSynonymRuleRequest extends RequestBase { - set_id: Id - rule_id: Id -} - -export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead - -export interface SynonymsGetSynonymsSetsRequest extends RequestBase { - from?: integer - size?: integer -} - -export interface SynonymsGetSynonymsSetsResponse { - count: integer - results: SynonymsGetSynonymsSetsSynonymsSetItem[] -} - -export interface SynonymsGetSynonymsSetsSynonymsSetItem { - synonyms_set: Id - count: integer -} - -export interface SynonymsPutSynonymRequest extends RequestBase { - id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] - } -} - -export interface SynonymsPutSynonymResponse { - result: Result - reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult -} - -export interface SynonymsPutSynonymRuleRequest extends RequestBase { - set_id: Id - rule_id: Id - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - synonyms: SynonymsSynonymString - } -} - -export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult - -export type TasksGroupBy = 'nodes' | 'parents' | 'none' - -export interface TasksNodeTasks { - name?: NodeId - transport_address?: TransportAddress - host?: Host - ip?: Ip - roles?: string[] - attributes?: Record - tasks: Record -} - -export interface TasksParentTaskInfo extends TasksTaskInfo { - children?: TasksTaskInfo[] -} - -export interface TasksTaskInfo { - action: string - cancelled?: boolean - cancellable: boolean - description?: string - headers: Record - id: long - node: NodeId - running_time?: Duration - running_time_in_nanos: DurationValue - start_time_in_millis: EpochTime - status?: any - type: string - parent_task_id?: TaskId -} - -export type TasksTaskInfos = TasksTaskInfo[] | Record - -export interface TasksTaskListResponseBase { - node_failures?: ErrorCause[] - task_failures?: TaskFailure[] - nodes?: Record - tasks?: TasksTaskInfos -} - -export interface TasksCancelRequest extends RequestBase { - task_id?: TaskId - actions?: string | string[] - nodes?: string[] - parent_task_id?: string - wait_for_completion?: boolean -} - -export type TasksCancelResponse = TasksTaskListResponseBase - -export interface TasksGetRequest extends RequestBase { - task_id: Id - timeout?: Duration - wait_for_completion?: boolean -} - -export interface TasksGetResponse { - completed: boolean - task: TasksTaskInfo - response?: any - error?: ErrorCause -} - -export interface TasksListRequest extends RequestBase { - actions?: string | string[] - detailed?: boolean - group_by?: TasksGroupBy - nodes?: NodeIds - parent_task_id?: Id - timeout?: Duration - wait_for_completion?: boolean -} - -export type TasksListResponse = TasksTaskListResponseBase - -export type TextStructureEcsCompatibilityType = 'disabled' | 'v1' - -export interface TextStructureFieldStat { - count: integer - cardinality: integer - top_hits: TextStructureTopHit[] - mean_value?: integer - median_value?: integer - max_value?: integer - min_value?: integer - earliest?: string - latest?: string -} - -export type TextStructureFormatType = 'delimited' | 'ndjson' | 'semi_structured_text' | 'xml' - -export interface TextStructureTopHit { - count: long - value: any -} - -export interface TextStructureFindFieldStructureRequest extends RequestBase { - column_names?: string - delimiter?: string - documents_to_sample?: uint - ecs_compatibility?: TextStructureEcsCompatibilityType - explain?: boolean - field: Field - format?: TextStructureFormatType - grok_pattern?: GrokPattern - index: IndexName - quote?: string - should_trim_fields?: boolean - timeout?: Duration - timestamp_field?: Field - timestamp_format?: string -} - -export interface TextStructureFindFieldStructureResponse { - charset: string - ecs_compatibility?: TextStructureEcsCompatibilityType - field_stats: Record - format: TextStructureFormatType - grok_pattern?: GrokPattern - java_timestamp_formats?: string[] - joda_timestamp_formats?: string[] - ingest_pipeline: IngestPipelineConfig - mappings: MappingTypeMapping - multiline_start_pattern?: string - need_client_timezone: boolean - num_lines_analyzed: integer - num_messages_analyzed: integer - sample_start: string - timestamp_field?: Field -} - -export interface TextStructureFindMessageStructureRequest extends RequestBase { - column_names?: string - delimiter?: string - ecs_compatibility?: TextStructureEcsCompatibilityType - explain?: boolean - format?: TextStructureFormatType - grok_pattern?: GrokPattern - quote?: string - should_trim_fields?: boolean - timeout?: Duration - timestamp_field?: Field - timestamp_format?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - messages: string[] - } -} - -export interface TextStructureFindMessageStructureResponse { - charset: string - ecs_compatibility?: TextStructureEcsCompatibilityType - field_stats: Record - format: TextStructureFormatType - grok_pattern?: GrokPattern - java_timestamp_formats?: string[] - joda_timestamp_formats?: string[] - ingest_pipeline: IngestPipelineConfig - mappings: MappingTypeMapping - multiline_start_pattern?: string - need_client_timezone: boolean - num_lines_analyzed: integer - num_messages_analyzed: integer - sample_start: string - timestamp_field?: Field -} - -export interface TextStructureFindStructureRequest { - charset?: string - column_names?: string - delimiter?: string - ecs_compatibility?: string - explain?: boolean - format?: string - grok_pattern?: GrokPattern - has_header_row?: boolean - line_merge_size_limit?: uint - lines_to_sample?: uint - quote?: string - should_trim_fields?: boolean - timeout?: Duration - timestamp_field?: Field - timestamp_format?: string - /** @deprecated The use of the 'body' key has been deprecated, use 'text_files' instead. */ - body?: TJsonDocument[] -} - -export interface TextStructureFindStructureResponse { - charset: string - has_header_row?: boolean - has_byte_order_marker: boolean - format: string - field_stats: Record - sample_start: string - num_messages_analyzed: integer - mappings: MappingTypeMapping - quote?: string - delimiter?: string - need_client_timezone: boolean - num_lines_analyzed: integer - column_names?: string[] - explanation?: string[] - grok_pattern?: GrokPattern - multiline_start_pattern?: string - exclude_lines_pattern?: string - java_timestamp_formats?: string[] - joda_timestamp_formats?: string[] - timestamp_field?: Field - should_trim_fields?: boolean - ingest_pipeline: IngestPipelineConfig -} - -export interface TextStructureTestGrokPatternMatchedField { - match: string - offset: integer - length: integer -} - -export interface TextStructureTestGrokPatternMatchedText { - matched: boolean - fields?: Record -} - -export interface TextStructureTestGrokPatternRequest extends RequestBase { - ecs_compatibility?: string - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - grok_pattern: GrokPattern - text: string[] - } -} - -export interface TextStructureTestGrokPatternResponse { - matches: TextStructureTestGrokPatternMatchedText[] -} - -export interface TransformDestination { - index?: IndexName - pipeline?: string -} - -export interface TransformLatest { - sort: Field - unique_key: Field[] -} - -export interface TransformPivot { - aggregations?: Record - aggs?: Record - group_by?: Record -} - -export interface TransformPivotGroupByContainer { - date_histogram?: AggregationsDateHistogramAggregation - geotile_grid?: AggregationsGeoTileGridAggregation - histogram?: AggregationsHistogramAggregation - terms?: AggregationsTermsAggregation -} - -export interface TransformRetentionPolicy { - field: Field - max_age: Duration -} - -export interface TransformRetentionPolicyContainer { - time?: TransformRetentionPolicy -} - -export interface TransformSettings { - align_checkpoints?: boolean - dates_as_epoch_millis?: boolean - deduce_mappings?: boolean - docs_per_second?: float - max_page_search_size?: integer - unattended?: boolean -} - -export interface TransformSource { - index: Indices - query?: QueryDslQueryContainer - runtime_mappings?: MappingRuntimeFields -} - -export interface TransformSyncContainer { - time?: TransformTimeSync -} - -export interface TransformTimeSync { - delay?: Duration - field: Field -} - -export interface TransformDeleteTransformRequest extends RequestBase { - transform_id: Id - force?: boolean - delete_dest_index?: boolean - timeout?: Duration -} - -export type TransformDeleteTransformResponse = AcknowledgedResponseBase - -export interface TransformGetTransformRequest extends RequestBase { - transform_id?: Names - allow_no_match?: boolean - from?: integer - size?: integer - exclude_generated?: boolean -} - -export interface TransformGetTransformResponse { - count: long - transforms: TransformGetTransformTransformSummary[] -} - -export interface TransformGetTransformTransformSummary { - authorization?: MlTransformAuthorization - create_time?: EpochTime - create_time_string?: DateTime - description?: string - dest: ReindexDestination - frequency?: Duration - id: Id - latest?: TransformLatest - pivot?: TransformPivot - retention_policy?: TransformRetentionPolicyContainer - settings?: TransformSettings - source: TransformSource - sync?: TransformSyncContainer - version?: VersionString - _meta?: Metadata -} - -export interface TransformGetTransformStatsCheckpointStats { - checkpoint: long - checkpoint_progress?: TransformGetTransformStatsTransformProgress - timestamp?: DateTime - timestamp_millis?: EpochTime - time_upper_bound?: DateTime - time_upper_bound_millis?: EpochTime -} - -export interface TransformGetTransformStatsCheckpointing { - changes_last_detected_at?: long - changes_last_detected_at_string?: DateTime - last: TransformGetTransformStatsCheckpointStats - next?: TransformGetTransformStatsCheckpointStats - operations_behind?: long - last_search_time?: long - last_search_time_string?: DateTime -} - -export interface TransformGetTransformStatsRequest extends RequestBase { - transform_id: Names - allow_no_match?: boolean - from?: long - size?: long - timeout?: Duration -} - -export interface TransformGetTransformStatsResponse { - count: long - transforms: TransformGetTransformStatsTransformStats[] -} - -export interface TransformGetTransformStatsTransformHealthIssue { - type: string - issue: string - details?: string - count: integer - first_occurrence?: EpochTime - first_occurence_string?: DateTime -} - -export interface TransformGetTransformStatsTransformIndexerStats { - delete_time_in_ms?: EpochTime - documents_indexed: long - documents_deleted?: long - documents_processed: long - exponential_avg_checkpoint_duration_ms: DurationValue - exponential_avg_documents_indexed: double - exponential_avg_documents_processed: double - index_failures: long - index_time_in_ms: DurationValue - index_total: long - pages_processed: long - processing_time_in_ms: DurationValue - processing_total: long - search_failures: long - search_time_in_ms: DurationValue - search_total: long - trigger_count: long -} - -export interface TransformGetTransformStatsTransformProgress { - docs_indexed: long - docs_processed: long - docs_remaining?: long - percent_complete?: double - total_docs?: long -} - -export interface TransformGetTransformStatsTransformStats { - checkpointing: TransformGetTransformStatsCheckpointing - health?: TransformGetTransformStatsTransformStatsHealth - id: Id - node?: NodeAttributes - reason?: string - state: string - stats: TransformGetTransformStatsTransformIndexerStats -} - -export interface TransformGetTransformStatsTransformStatsHealth { - status: HealthStatus - issues?: TransformGetTransformStatsTransformHealthIssue[] -} - -export interface TransformPreviewTransformRequest extends RequestBase { - transform_id?: Id - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - dest?: TransformDestination - description?: string - frequency?: Duration - pivot?: TransformPivot - source?: TransformSource - settings?: TransformSettings - sync?: TransformSyncContainer - retention_policy?: TransformRetentionPolicyContainer - latest?: TransformLatest - } -} - -export interface TransformPreviewTransformResponse { - generated_dest_index: IndicesIndexState - preview: TTransform[] -} - -export interface TransformPutTransformRequest extends RequestBase { - transform_id: Id - defer_validation?: boolean - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - dest: TransformDestination - description?: string - frequency?: Duration - latest?: TransformLatest - _meta?: Metadata - pivot?: TransformPivot - retention_policy?: TransformRetentionPolicyContainer - settings?: TransformSettings - source: TransformSource - sync?: TransformSyncContainer - } -} - -export type TransformPutTransformResponse = AcknowledgedResponseBase - -export interface TransformResetTransformRequest extends RequestBase { - transform_id: Id - force?: boolean - timeout?: Duration -} - -export type TransformResetTransformResponse = AcknowledgedResponseBase - -export interface TransformScheduleNowTransformRequest extends RequestBase { - transform_id: Id - timeout?: Duration -} - -export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase - -export interface TransformStartTransformRequest extends RequestBase { - transform_id: Id - timeout?: Duration - from?: string -} - -export type TransformStartTransformResponse = AcknowledgedResponseBase - -export interface TransformStopTransformRequest extends RequestBase { - transform_id: Name - allow_no_match?: boolean - force?: boolean - timeout?: Duration - wait_for_checkpoint?: boolean - wait_for_completion?: boolean -} - -export type TransformStopTransformResponse = AcknowledgedResponseBase - -export interface TransformUpdateTransformRequest extends RequestBase { - transform_id: Id - defer_validation?: boolean - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - dest?: TransformDestination - description?: string - frequency?: Duration - _meta?: Metadata - source?: TransformSource - settings?: TransformSettings - sync?: TransformSyncContainer - retention_policy?: TransformRetentionPolicyContainer | null - } -} - -export interface TransformUpdateTransformResponse { - authorization?: MlTransformAuthorization - create_time: long - description: string - dest: ReindexDestination - frequency?: Duration - id: Id - latest?: TransformLatest - pivot?: TransformPivot - retention_policy?: TransformRetentionPolicyContainer - settings: TransformSettings - source: ReindexSource - sync?: TransformSyncContainer - version: VersionString - _meta?: Metadata -} - -export interface TransformUpgradeTransformsRequest extends RequestBase { - dry_run?: boolean - timeout?: Duration -} - -export interface TransformUpgradeTransformsResponse { - needs_update: integer - no_action: integer - updated: integer -} - -export interface WatcherAcknowledgeState { - state: WatcherAcknowledgementOptions - timestamp: DateTime -} - -export type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked' - -export interface WatcherAction { - action_type?: WatcherActionType - condition?: WatcherConditionContainer - foreach?: string - max_iterations?: integer - name?: Name - throttle_period?: Duration - throttle_period_in_millis?: DurationValue - transform?: TransformContainer - index?: WatcherIndexAction - logging?: WatcherLoggingAction - email?: WatcherEmailAction - pagerduty?: WatcherPagerDutyAction - slack?: WatcherSlackAction - webhook?: WatcherWebhookAction -} - -export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' - -export interface WatcherActionStatus { - ack: WatcherAcknowledgeState - last_execution?: WatcherExecutionState - last_successful_execution?: WatcherExecutionState - last_throttle?: WatcherThrottleState -} - -export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | 'throttled' - -export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' - -export type WatcherActions = Record - -export interface WatcherActivationState { - active: boolean - timestamp: DateTime -} - -export interface WatcherActivationStatus { - actions: WatcherActions - state: WatcherActivationState - version: VersionNumber -} - -export interface WatcherAlwaysCondition { -} - -export interface WatcherArrayCompareConditionKeys { - path: string -} -export type WatcherArrayCompareCondition = WatcherArrayCompareConditionKeys -& { [property: string]: WatcherArrayCompareOpParams | string } - -export interface WatcherArrayCompareOpParams { - quantifier: WatcherQuantifier - value: FieldValue -} - -export interface WatcherChainInput { - inputs: Partial>[] -} - -export interface WatcherConditionContainer { - always?: WatcherAlwaysCondition - array_compare?: Partial> - compare?: Partial>>> - never?: WatcherNeverCondition - script?: WatcherScriptCondition -} - -export type WatcherConditionOp = 'not_eq' | 'eq' | 'lt' | 'gt' | 'lte' | 'gte' - -export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' - -export type WatcherConnectionScheme = 'http' | 'https' - -export type WatcherCronExpression = string - -export interface WatcherDailySchedule { - at: WatcherScheduleTimeOfDay[] -} - -export type WatcherDataAttachmentFormat = 'json' | 'yaml' - -export interface WatcherDataEmailAttachment { - format?: WatcherDataAttachmentFormat -} - -export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' - -export interface WatcherEmail { - id?: Id - bcc?: string | string[] - body?: WatcherEmailBody - cc?: string | string[] - from?: string - priority?: WatcherEmailPriority - reply_to?: string | string[] - sent_date?: DateTime - subject: string - to: string | string[] - attachments?: Record -} - -export interface WatcherEmailAction extends WatcherEmail { -} - -export interface WatcherEmailAttachmentContainer { - http?: WatcherHttpEmailAttachment - reporting?: WatcherReportingEmailAttachment - data?: WatcherDataEmailAttachment -} - -export interface WatcherEmailBody { - html?: string - text?: string -} - -export type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' - -export interface WatcherEmailResult { - account?: string - message: WatcherEmail - reason?: string -} - -export type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | 'condition' | 'actions' | 'watch_transform' | 'aborted' | 'finished' - -export interface WatcherExecutionResult { - actions: WatcherExecutionResultAction[] - condition: WatcherExecutionResultCondition - execution_duration: DurationValue - execution_time: DateTime - input: WatcherExecutionResultInput -} - -export interface WatcherExecutionResultAction { - email?: WatcherEmailResult - id: Id - index?: WatcherIndexResult - logging?: WatcherLoggingResult - pagerduty?: WatcherPagerDutyResult - reason?: string - slack?: WatcherSlackResult - status: WatcherActionStatusOptions - type: WatcherActionType - webhook?: WatcherWebhookResult - error?: ErrorCause -} - -export interface WatcherExecutionResultCondition { - met: boolean - status: WatcherActionStatusOptions - type: WatcherConditionType -} - -export interface WatcherExecutionResultInput { - payload: Record - status: WatcherActionStatusOptions - type: WatcherInputType -} - -export interface WatcherExecutionState { - successful: boolean - timestamp: DateTime - reason?: string -} - -export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' - -export interface WatcherExecutionThreadPool { - max_size: long - queue_size: long -} - -export interface WatcherHourAndMinute { - hour: integer[] - minute: integer[] -} - -export interface WatcherHourlySchedule { - minute: integer[] -} - -export interface WatcherHttpEmailAttachment { - content_type?: string - inline?: boolean - request?: WatcherHttpInputRequestDefinition -} - -export interface WatcherHttpInput { - extract?: string[] - request?: WatcherHttpInputRequestDefinition - response_content_type?: WatcherResponseContentType -} - -export interface WatcherHttpInputAuthentication { - basic: WatcherHttpInputBasicAuthentication -} - -export interface WatcherHttpInputBasicAuthentication { - password: Password - username: Username -} - -export type WatcherHttpInputMethod = 'head' | 'get' | 'post' | 'put' | 'delete' - -export interface WatcherHttpInputProxy { - host: Host - port: uint -} - -export interface WatcherHttpInputRequestDefinition { - auth?: WatcherHttpInputAuthentication - body?: string - connection_timeout?: Duration - headers?: Record - host?: Host - method?: WatcherHttpInputMethod - params?: Record - path?: string - port?: uint - proxy?: WatcherHttpInputProxy - read_timeout?: Duration - scheme?: WatcherConnectionScheme - url?: string -} - -export interface WatcherHttpInputRequestResult extends WatcherHttpInputRequestDefinition { -} - -export interface WatcherHttpInputResponseResult { - body: string - headers: HttpHeaders - status: integer -} - -export interface WatcherIndexAction { - index: IndexName - doc_id?: Id - refresh?: Refresh - op_type?: OpType - timeout?: Duration - execution_time_field?: Field -} - -export interface WatcherIndexResult { - response: WatcherIndexResultSummary -} - -export interface WatcherIndexResultSummary { - created: boolean - id: Id - index: IndexName - result: Result - version: VersionNumber -} - -export interface WatcherInputContainer { - chain?: WatcherChainInput - http?: WatcherHttpInput - search?: WatcherSearchInput - simple?: Record -} - -export type WatcherInputType = 'http' | 'search' | 'simple' - -export interface WatcherLoggingAction { - level?: string - text: string - category?: string -} - -export interface WatcherLoggingResult { - logged_text: string -} - -export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' - -export interface WatcherNeverCondition { -} - -export interface WatcherPagerDutyAction extends WatcherPagerDutyEvent { -} - -export interface WatcherPagerDutyContext { - href?: string - src?: string - type: WatcherPagerDutyContextType -} - -export type WatcherPagerDutyContextType = 'link' | 'image' - -export interface WatcherPagerDutyEvent { - account?: string - attach_payload: boolean - client?: string - client_url?: string - contexts?: WatcherPagerDutyContext[] - context?: WatcherPagerDutyContext[] - description: string - event_type?: WatcherPagerDutyEventType - incident_key: string - proxy?: WatcherPagerDutyEventProxy -} - -export interface WatcherPagerDutyEventProxy { - host?: Host - port?: integer -} - -export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' - -export interface WatcherPagerDutyResult { - event: WatcherPagerDutyEvent - reason?: string - request?: WatcherHttpInputRequestResult - response?: WatcherHttpInputResponseResult -} - -export type WatcherQuantifier = 'some' | 'all' - -export interface WatcherQueryWatch { - _id: Id - status?: WatcherWatchStatus - watch?: WatcherWatch - _primary_term?: integer - _seq_no?: SequenceNumber -} - -export interface WatcherReportingEmailAttachment { - url: string - inline?: boolean - retries?: integer - interval?: Duration - request?: WatcherHttpInputRequestDefinition -} - -export type WatcherResponseContentType = 'json' | 'yaml' | 'text' - -export interface WatcherScheduleContainer { - timezone?: string - cron?: WatcherCronExpression - daily?: WatcherDailySchedule - hourly?: WatcherHourlySchedule - interval?: Duration - monthly?: WatcherTimeOfMonth | WatcherTimeOfMonth[] - weekly?: WatcherTimeOfWeek | WatcherTimeOfWeek[] - yearly?: WatcherTimeOfYear | WatcherTimeOfYear[] -} - -export type WatcherScheduleTimeOfDay = string | WatcherHourAndMinute - -export interface WatcherScheduleTriggerEvent { - scheduled_time: DateTime - triggered_time?: DateTime -} - -export interface WatcherScriptCondition { - lang?: string - params?: Record - source?: string - id?: string -} - -export interface WatcherSearchInput { - extract?: string[] - request: WatcherSearchInputRequestDefinition - timeout?: Duration -} - -export interface WatcherSearchInputRequestBody { - query: QueryDslQueryContainer -} - -export interface WatcherSearchInputRequestDefinition { - body?: WatcherSearchInputRequestBody - indices?: IndexName[] - indices_options?: IndicesOptions - search_type?: SearchType - template?: WatcherSearchTemplateRequestBody - rest_total_hits_as_int?: boolean -} - -export interface WatcherSearchTemplateRequestBody { - explain?: boolean - id?: Id - params?: Record - profile?: boolean - source?: string -} - -export interface WatcherSimulatedActions { - actions: string[] - all: WatcherSimulatedActions - use_all: boolean -} - -export interface WatcherSlackAction { - account?: string - message: WatcherSlackMessage -} - -export interface WatcherSlackAttachment { - author_icon?: string - author_link?: string - author_name: string - color?: string - fallback?: string - fields?: WatcherSlackAttachmentField[] - footer?: string - footer_icon?: string - image_url?: string - pretext?: string - text?: string - thumb_url?: string - title: string - title_link?: string - ts?: EpochTime -} - -export interface WatcherSlackAttachmentField { - short: boolean - title: string - value: string -} - -export interface WatcherSlackDynamicAttachment { - attachment_template: WatcherSlackAttachment - list_path: string -} - -export interface WatcherSlackMessage { - attachments: WatcherSlackAttachment[] - dynamic_attachments?: WatcherSlackDynamicAttachment - from: string - icon?: string - text: string - to: string[] -} - -export interface WatcherSlackResult { - account?: string - message: WatcherSlackMessage -} - -export interface WatcherThrottleState { - reason: string - timestamp: DateTime -} - -export interface WatcherTimeOfMonth { - at: string[] - on: integer[] -} - -export interface WatcherTimeOfWeek { - at: string[] - on: WatcherDay[] -} - -export interface WatcherTimeOfYear { - at: string[] - int: WatcherMonth[] - on: integer[] -} - -export interface WatcherTriggerContainer { - schedule?: WatcherScheduleContainer -} - -export interface WatcherTriggerEventContainer { - schedule?: WatcherScheduleTriggerEvent -} - -export interface WatcherTriggerEventResult { - manual: WatcherTriggerEventContainer - triggered_time: DateTime - type: string -} - -export interface WatcherWatch { - actions: Record - condition: WatcherConditionContainer - input: WatcherInputContainer - metadata?: Metadata - status?: WatcherWatchStatus - throttle_period?: Duration - throttle_period_in_millis?: DurationValue - transform?: TransformContainer - trigger: WatcherTriggerContainer -} - -export interface WatcherWatchStatus { - actions: WatcherActions - last_checked?: DateTime - last_met_condition?: DateTime - state: WatcherActivationState - version: VersionNumber - execution_state?: string -} - -export interface WatcherWebhookAction extends WatcherHttpInputRequestDefinition { -} - -export interface WatcherWebhookResult { - request: WatcherHttpInputRequestResult - response?: WatcherHttpInputResponseResult -} - -export interface WatcherAckWatchRequest extends RequestBase { - watch_id: Name - action_id?: Names -} - -export interface WatcherAckWatchResponse { - status: WatcherWatchStatus -} - -export interface WatcherActivateWatchRequest extends RequestBase { - watch_id: Name -} - -export interface WatcherActivateWatchResponse { - status: WatcherActivationStatus -} - -export interface WatcherDeactivateWatchRequest extends RequestBase { - watch_id: Name -} - -export interface WatcherDeactivateWatchResponse { - status: WatcherActivationStatus -} - -export interface WatcherDeleteWatchRequest extends RequestBase { - id: Name -} - -export interface WatcherDeleteWatchResponse { - found: boolean - _id: Id - _version: VersionNumber -} - -export interface WatcherExecuteWatchRequest extends RequestBase { - id?: Id - debug?: boolean - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - action_modes?: Record - alternative_input?: Record - ignore_condition?: boolean - record_execution?: boolean - simulated_actions?: WatcherSimulatedActions - trigger_data?: WatcherScheduleTriggerEvent - watch?: WatcherWatch - } -} - -export interface WatcherExecuteWatchResponse { - _id: Id - watch_record: WatcherExecuteWatchWatchRecord -} - -export interface WatcherExecuteWatchWatchRecord { - condition: WatcherConditionContainer - input: WatcherInputContainer - messages: string[] - metadata?: Metadata - node: string - result: WatcherExecutionResult - state: WatcherExecutionStatus - trigger_event: WatcherTriggerEventResult - user: Username - watch_id: Id - status?: WatcherWatchStatus -} - -export interface WatcherGetSettingsRequest extends RequestBase { - master_timeout?: Duration -} - -export interface WatcherGetSettingsResponse { - index: IndicesIndexSettings -} - -export interface WatcherGetWatchRequest extends RequestBase { - id: Name -} - -export interface WatcherGetWatchResponse { - found: boolean - _id: Id - status?: WatcherWatchStatus - watch?: WatcherWatch - _primary_term?: integer - _seq_no?: SequenceNumber - _version?: VersionNumber -} - -export interface WatcherPutWatchRequest extends RequestBase { - id: Id - active?: boolean - if_primary_term?: long - if_seq_no?: SequenceNumber - version?: VersionNumber - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - actions?: Record - condition?: WatcherConditionContainer - input?: WatcherInputContainer - metadata?: Metadata - throttle_period?: Duration - throttle_period_in_millis?: DurationValue - transform?: TransformContainer - trigger?: WatcherTriggerContainer - } -} - -export interface WatcherPutWatchResponse { - created: boolean - _id: Id - _primary_term: long - _seq_no: SequenceNumber - _version: VersionNumber -} - -export interface WatcherQueryWatchesRequest extends RequestBase { - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - from?: integer - size?: integer - query?: QueryDslQueryContainer - sort?: Sort - search_after?: SortResults - } -} - -export interface WatcherQueryWatchesResponse { - count: integer - watches: WatcherQueryWatch[] -} - -export interface WatcherStartRequest extends RequestBase { - master_timeout?: Duration -} - -export type WatcherStartResponse = AcknowledgedResponseBase - -export interface WatcherStatsRequest extends RequestBase { - metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] - emit_stacktraces?: boolean -} - -export interface WatcherStatsResponse { - _nodes: NodeStatistics - cluster_name: Name - manually_stopped: boolean - stats: WatcherStatsWatcherNodeStats[] -} - -export interface WatcherStatsWatchRecordQueuedStats { - execution_time: DateTime -} - -export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { - execution_phase: WatcherExecutionPhase - triggered_time: DateTime - executed_actions?: string[] - watch_id: Id - watch_record_id: Id -} - -export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches' - -export interface WatcherStatsWatcherNodeStats { - current_watches?: WatcherStatsWatchRecordStats[] - execution_thread_pool: WatcherExecutionThreadPool - queued_watches?: WatcherStatsWatchRecordQueuedStats[] - watch_count: long - watcher_state: WatcherStatsWatcherState - node_id: Id -} - -export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' - -export interface WatcherStopRequest extends RequestBase { - master_timeout?: Duration -} - -export type WatcherStopResponse = AcknowledgedResponseBase - -export interface WatcherUpdateSettingsRequest extends RequestBase { - master_timeout?: Duration - timeout?: Duration - /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ - body?: { - 'index.auto_expand_replicas'?: string - 'index.number_of_replicas'?: integer - } -} - -export interface WatcherUpdateSettingsResponse { - acknowledged: boolean -} - -export interface XpackInfoBuildInformation { - date: DateTime - hash: string -} - -export interface XpackInfoFeature { - available: boolean - description?: string - enabled: boolean - native_code_info?: XpackInfoNativeCodeInformation -} - -export interface XpackInfoFeatures { - aggregate_metric: XpackInfoFeature - analytics: XpackInfoFeature - ccr: XpackInfoFeature - data_streams: XpackInfoFeature - data_tiers: XpackInfoFeature - enrich: XpackInfoFeature - enterprise_search: XpackInfoFeature - eql: XpackInfoFeature - esql: XpackInfoFeature - frozen_indices: XpackInfoFeature - graph: XpackInfoFeature - ilm: XpackInfoFeature - logstash: XpackInfoFeature - logsdb: XpackInfoFeature - ml: XpackInfoFeature - monitoring: XpackInfoFeature - rollup: XpackInfoFeature - runtime_fields?: XpackInfoFeature - searchable_snapshots: XpackInfoFeature - security: XpackInfoFeature - slm: XpackInfoFeature - spatial: XpackInfoFeature - sql: XpackInfoFeature - transform: XpackInfoFeature - universal_profiling: XpackInfoFeature - voting_only: XpackInfoFeature - watcher: XpackInfoFeature - archive: XpackInfoFeature -} - -export interface XpackInfoMinimalLicenseInformation { - expiry_date_in_millis: EpochTime - mode: LicenseLicenseType - status: LicenseLicenseStatus - type: LicenseLicenseType - uid: string -} - -export interface XpackInfoNativeCodeInformation { - build_hash: string - version: VersionString -} - -export interface XpackInfoRequest extends RequestBase { - categories?: XpackInfoXPackCategory[] - accept_enterprise?: boolean - human?: boolean -} - -export interface XpackInfoResponse { - build: XpackInfoBuildInformation - features: XpackInfoFeatures - license: XpackInfoMinimalLicenseInformation - tagline: string -} - -export type XpackInfoXPackCategory = 'build' | 'features' | 'license' - -export interface XpackUsageAnalytics extends XpackUsageBase { - stats: XpackUsageAnalyticsStatistics -} - -export interface XpackUsageAnalyticsStatistics { - boxplot_usage: long - cumulative_cardinality_usage: long - string_stats_usage: long - top_metrics_usage: long - t_test_usage: long - moving_percentiles_usage: long - normalize_usage: long - rate_usage: long - multi_terms_usage?: long -} - -export interface XpackUsageArchive extends XpackUsageBase { - indices_count: long -} - -export interface XpackUsageAudit extends XpackUsageFeatureToggle { - outputs?: string[] -} - -export interface XpackUsageBase { - available: boolean - enabled: boolean -} - -export interface XpackUsageCcr extends XpackUsageBase { - auto_follow_patterns_count: integer - follower_indices_count: integer -} - -export interface XpackUsageCounter { - active: long - total: long -} - -export interface XpackUsageDataStreams extends XpackUsageBase { - data_streams: long - indices_count: long -} - -export interface XpackUsageDataTierPhaseStatistics { - node_count: long - index_count: long - total_shard_count: long - primary_shard_count: long - doc_count: long - total_size_bytes: long - primary_size_bytes: long - primary_shard_size_avg_bytes: long - primary_shard_size_median_bytes: long - primary_shard_size_mad_bytes: long -} - -export interface XpackUsageDataTiers extends XpackUsageBase { - data_warm: XpackUsageDataTierPhaseStatistics - data_frozen?: XpackUsageDataTierPhaseStatistics - data_cold: XpackUsageDataTierPhaseStatistics - data_content: XpackUsageDataTierPhaseStatistics - data_hot: XpackUsageDataTierPhaseStatistics -} - -export interface XpackUsageDatafeed { - count: long -} - -export interface XpackUsageEql extends XpackUsageBase { - features: XpackUsageEqlFeatures - queries: Record -} - -export interface XpackUsageEqlFeatures { - join: uint - joins: XpackUsageEqlFeaturesJoin - keys: XpackUsageEqlFeaturesKeys - event: uint - pipes: XpackUsageEqlFeaturesPipes - sequence: uint - sequences: XpackUsageEqlFeaturesSequences -} - -export interface XpackUsageEqlFeaturesJoin { - join_queries_two: uint - join_queries_three: uint - join_until: uint - join_queries_five_or_more: uint - join_queries_four: uint -} - -export interface XpackUsageEqlFeaturesKeys { - join_keys_two: uint - join_keys_one: uint - join_keys_three: uint - join_keys_five_or_more: uint - join_keys_four: uint -} - -export interface XpackUsageEqlFeaturesPipes { - pipe_tail: uint - pipe_head: uint -} - -export interface XpackUsageEqlFeaturesSequences { - sequence_queries_three: uint - sequence_queries_four: uint - sequence_queries_two: uint - sequence_until: uint - sequence_queries_five_or_more: uint - sequence_maxspan: uint -} - -export interface XpackUsageFeatureToggle { - enabled: boolean -} - -export interface XpackUsageFlattened extends XpackUsageBase { - field_count: integer -} - -export interface XpackUsageFrozenIndices extends XpackUsageBase { - indices_count: long -} - -export interface XpackUsageHealthStatistics extends XpackUsageBase { - invocations: XpackUsageInvocations -} - -export interface XpackUsageIlm { - policy_count: integer - policy_stats: XpackUsageIlmPolicyStatistics[] -} - -export interface XpackUsageIlmPolicyStatistics { - indices_managed: integer - phases: XpackUsagePhases -} - -export interface XpackUsageInvocations { - total: long -} - -export interface XpackUsageIpFilter { - http: boolean - transport: boolean -} - -export interface XpackUsageJobUsage { - count: integer - created_by: Record - detectors: MlJobStatistics - forecasts: XpackUsageMlJobForecasts - model_size: MlJobStatistics -} - -export interface XpackUsageMachineLearning extends XpackUsageBase { - datafeeds: Record - jobs: Record - node_count: integer - data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs - inference: XpackUsageMlInference -} - -export interface XpackUsageMlCounter { - count: long -} - -export interface XpackUsageMlDataFrameAnalyticsJobs { - memory_usage?: XpackUsageMlDataFrameAnalyticsJobsMemory - _all: XpackUsageMlDataFrameAnalyticsJobsCount - analysis_counts?: XpackUsageMlDataFrameAnalyticsJobsAnalysis - stopped?: XpackUsageMlDataFrameAnalyticsJobsCount -} - -export interface XpackUsageMlDataFrameAnalyticsJobsAnalysis { - classification?: integer - outlier_detection?: integer - regression?: integer -} - -export interface XpackUsageMlDataFrameAnalyticsJobsCount { - count: long -} - -export interface XpackUsageMlDataFrameAnalyticsJobsMemory { - peak_usage_bytes: MlJobStatistics -} - -export interface XpackUsageMlInference { - ingest_processors: Record - trained_models: XpackUsageMlInferenceTrainedModels - deployments?: XpackUsageMlInferenceDeployments -} - -export interface XpackUsageMlInferenceDeployments { - count: integer - inference_counts: MlJobStatistics - model_sizes_bytes: MlJobStatistics - time_ms: XpackUsageMlInferenceDeploymentsTimeMs -} - -export interface XpackUsageMlInferenceDeploymentsTimeMs { - avg: double -} - -export interface XpackUsageMlInferenceIngestProcessor { - num_docs_processed: XpackUsageMlInferenceIngestProcessorCount - pipelines: XpackUsageMlCounter - num_failures: XpackUsageMlInferenceIngestProcessorCount - time_ms: XpackUsageMlInferenceIngestProcessorCount -} - -export interface XpackUsageMlInferenceIngestProcessorCount { - max: long - sum: long - min: long -} - -export interface XpackUsageMlInferenceTrainedModels { - estimated_operations?: MlJobStatistics - estimated_heap_memory_usage_bytes?: MlJobStatistics - count?: XpackUsageMlInferenceTrainedModelsCount - _all: XpackUsageMlCounter - model_size_bytes?: MlJobStatistics -} - -export interface XpackUsageMlInferenceTrainedModelsCount { - total: long - prepackaged: long - other: long - pass_through?: long - regression?: long - classification?: long - ner?: long - text_embedding?: long -} - -export interface XpackUsageMlJobForecasts { - total: long - forecasted_jobs: long -} - -export interface XpackUsageMonitoring extends XpackUsageBase { - collection_enabled: boolean - enabled_exporters: Record -} - -export interface XpackUsagePhase { - actions: string[] - min_age: DurationValue -} - -export interface XpackUsagePhases { - cold?: XpackUsagePhase - delete?: XpackUsagePhase - frozen?: XpackUsagePhase - hot?: XpackUsagePhase - warm?: XpackUsagePhase -} - -export interface XpackUsageQuery { - count?: integer - failed?: integer - paging?: integer - total?: integer -} - -export interface XpackUsageRealm extends XpackUsageBase { - name?: string[] - order?: long[] - size?: long[] - cache?: XpackUsageRealmCache[] - has_authorization_realms?: boolean[] - has_default_username_pattern?: boolean[] - has_truststore?: boolean[] - is_authentication_delegated?: boolean[] -} - -export interface XpackUsageRealmCache { - size: long -} - -export interface XpackUsageRequest extends RequestBase { - master_timeout?: Duration -} - -export interface XpackUsageResponse { - aggregate_metric: XpackUsageBase - analytics: XpackUsageAnalytics - archive: XpackUsageArchive - watcher: XpackUsageWatcher - ccr: XpackUsageCcr - data_frame?: XpackUsageBase - data_science?: XpackUsageBase - data_streams?: XpackUsageDataStreams - data_tiers: XpackUsageDataTiers - enrich?: XpackUsageBase - eql: XpackUsageEql - flattened?: XpackUsageFlattened - frozen_indices: XpackUsageFrozenIndices - graph: XpackUsageBase - health_api?: XpackUsageHealthStatistics - ilm: XpackUsageIlm - logstash: XpackUsageBase - ml: XpackUsageMachineLearning - monitoring: XpackUsageMonitoring - rollup: XpackUsageBase - runtime_fields?: XpackUsageRuntimeFieldTypes - spatial: XpackUsageBase - searchable_snapshots: XpackUsageSearchableSnapshots - security: XpackUsageSecurity - slm: XpackUsageSlm - sql: XpackUsageSql - transform: XpackUsageBase - vectors?: XpackUsageVector - voting_only: XpackUsageBase -} - -export interface XpackUsageRoleMapping { - enabled: integer - size: integer -} - -export interface XpackUsageRuntimeFieldTypes extends XpackUsageBase { - field_types: XpackUsageRuntimeFieldsType[] -} - -export interface XpackUsageRuntimeFieldsType { - chars_max: long - chars_total: long - count: long - doc_max: long - doc_total: long - index_count: long - lang: string[] - lines_max: long - lines_total: long - name: Field - scriptless_count: long - shadowed_count: long - source_max: long - source_total: long -} - -export interface XpackUsageSearchableSnapshots extends XpackUsageBase { - indices_count: integer - full_copy_indices_count?: integer - shared_cache_indices_count?: integer -} - -export interface XpackUsageSecurity extends XpackUsageBase { - api_key_service: XpackUsageFeatureToggle - anonymous: XpackUsageFeatureToggle - audit: XpackUsageAudit - fips_140: XpackUsageFeatureToggle - ipfilter: XpackUsageIpFilter - realms: Record - role_mapping: Record - roles: XpackUsageSecurityRoles - ssl: XpackUsageSsl - system_key?: XpackUsageFeatureToggle - token_service: XpackUsageFeatureToggle - operator_privileges: XpackUsageBase -} - -export interface XpackUsageSecurityRoles { - native: XpackUsageSecurityRolesNative - dls: XpackUsageSecurityRolesDls - file: XpackUsageSecurityRolesFile -} - -export interface XpackUsageSecurityRolesDls { - bit_set_cache: XpackUsageSecurityRolesDlsBitSetCache -} - -export interface XpackUsageSecurityRolesDlsBitSetCache { - count: integer - memory?: ByteSize - memory_in_bytes: ulong -} - -export interface XpackUsageSecurityRolesFile { - dls: boolean - fls: boolean - size: long -} - -export interface XpackUsageSecurityRolesNative { - dls: boolean - fls: boolean - size: long -} - -export interface XpackUsageSlm extends XpackUsageBase { - policy_count?: integer - policy_stats?: SlmStatistics -} - -export interface XpackUsageSql extends XpackUsageBase { - features: Record - queries: Record -} - -export interface XpackUsageSsl { - http: XpackUsageFeatureToggle - transport: XpackUsageFeatureToggle -} - -export interface XpackUsageVector extends XpackUsageBase { - dense_vector_dims_avg_count: integer - dense_vector_fields_count: integer - sparse_vector_fields_count?: integer -} - -export interface XpackUsageWatcher extends XpackUsageBase { - execution: XpackUsageWatcherActions - watch: XpackUsageWatcherWatch - count: XpackUsageCounter -} - -export interface XpackUsageWatcherActionTotals { - total: Duration - total_time_in_ms: DurationValue -} - -export interface XpackUsageWatcherActions { - actions: Record -} - -export interface XpackUsageWatcherWatch { - input: Record - condition?: Record - action?: Record - trigger: XpackUsageWatcherWatchTrigger -} - -export interface XpackUsageWatcherWatchTrigger { - schedule?: XpackUsageWatcherWatchTriggerSchedule - _all: XpackUsageCounter -} - -export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter { - cron: XpackUsageCounter - _all: XpackUsageCounter -} - -export interface SpecUtilsAdditionalProperties { -} - -export interface SpecUtilsAdditionalProperty { -} - -export interface SpecUtilsCommonQueryParameters { - error_trace?: boolean - filter_path?: string | string[] - human?: boolean - pretty?: boolean -} - -export interface SpecUtilsOverloadOf { -} - -export interface SpecUtilsCommonCatQueryParameters { - format?: string - help?: boolean - v?: boolean -}