diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 5b4e31664..acb918234 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -565,8 +565,8 @@ async def bulk( """ .. raw:: html -
Bulk index or delete documents.
- Perform multiple index, create, delete, and update actions in a single request.
+
Bulk index or delete documents.
+Perform multiple index, create, delete, and update actions in a single request.
This reduces overhead and can greatly increase indexing speed.
If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:
Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported.
+ + + `Clear a scrolling search. - Clear the search context and results for a scrolling search.
+Clear a scrolling search.
+Clear the search context and results for a scrolling search.
`Close a point in time. - A point in time must be opened explicitly before being used in search requests. +
Close a point in time.
+A point in time must be opened explicitly before being used in search requests.
The keep_alive parameter tells Elasticsearch how long it should persist.
A point in time is automatically closed when the keep_alive period has elapsed.
However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.
Count search results. - Get the number of documents matching a query.
+Count search results.
+Get the number of documents matching a query.
The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.
The query is optional. When no query is provided, the API uses match_all to count all the documents.
The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.
@@ -1643,11 +1710,11 @@ async def delete_by_query_rethrottle( self, *, task_id: str, + requests_per_second: float, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -1665,9 +1732,13 @@ async def delete_by_query_rethrottle( """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") + if requests_per_second is None: + raise ValueError("Empty value passed for parameter 'requests_per_second'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_delete_by_query/{__path_parts["task_id"]}/_rethrottle' __query: t.Dict[str, t.Any] = {} + if requests_per_second is not None: + __query["requests_per_second"] = requests_per_second if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -1676,8 +1747,6 @@ async def delete_by_query_rethrottle( __query["human"] = human if pretty is not None: __query["pretty"] = pretty - if requests_per_second is not None: - __query["requests_per_second"] = requests_per_second __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", @@ -1703,8 +1772,8 @@ async def delete_script( """ .. raw:: html -Delete a script or search template. - Deletes a stored script or search template.
+Delete a script or search template.
+Deletes a stored script or search template.
`Explain a document match result. - Get information about why a specific document matches, or doesn't match, a query. +
Explain a document match result.
+Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document.
@@ -2419,8 +2488,8 @@ async def get_script( """ .. raw:: html -Get a script or search template. - Retrieves a stored script or search template.
+Get a script or search template.
+Retrieves a stored script or search template.
`Get the cluster health. - Get a report with the health status of an Elasticsearch cluster. +
Get the cluster health.
+Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality.
Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status.
@@ -2969,8 +3038,8 @@ async def info( """ .. raw:: html -Get cluster info. - Get basic build, version, and cluster information. +
Get cluster info.
+Get basic build, version, and cluster information. ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored.
@@ -3664,8 +3733,8 @@ async def put_script( """ .. raw:: html -Create or update a script or search template. - Creates or updates a stored script or search template.
+Create or update a script or search template.
+Creates or updates a stored script or search template.
`Update documents. - Updates documents that match the specified query. +
Update documents.
+Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.
If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:
This API is a diagnostics API and the output should not be relied upon for building applications.
+ + + :param master_timeout: Period to wait for a connection to the master node. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/desired_balance" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.delete_desired_balance", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def delete_desired_nodes( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Designed for indirect use by ECE/ESS and ECK, direct use is not supported.
+ + + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/desired_nodes" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.delete_desired_nodes", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def get_desired_balance( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +This API is a diagnostics API and the output should not be relied upon for building applications.
+ + + :param master_timeout: Period to wait for a connection to the master node. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/desired_balance" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.get_desired_balance", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def get_desired_nodes( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Gets the latest desired nodes.
+ + + :param master_timeout: Period to wait for a connection to the master node. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/desired_nodes/_latest" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.get_desired_nodes", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def prevalidate_node_removal( + self, + *, + error_trace: t.Optional[bool] = None, + external_ids: t.Optional[t.Sequence[str]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + ids: t.Optional[t.Sequence[str]] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + names: t.Optional[t.Sequence[str]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Prevalidates node removal from the cluster.
+ + + :param external_ids: A comma-separated list of node external IDs to prevalidate + :param ids: A comma-separated list of node IDs to prevalidate + :param master_timeout: Period to wait for a connection to the master node. + :param names: A comma-separated list of node names to prevalidate + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/prevalidate_node_removal" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if external_ids is not None: + __query["external_ids"] = external_ids + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if ids is not None: + __query["ids"] = ids + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if names is not None: + __query["names"] = names + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.prevalidate_node_removal", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_name="body", + ignore_deprecated_options={"body"}, + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def update_desired_nodes( + self, + *, + history_id: str, + version: int, + body: t.Optional[t.Any] = None, + body: t.Optional[t.Any] = None, + dry_run: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Designed for indirect use by ECE/ESS and ECK, direct use is not supported.
+ + + :param history_id: The history ID + :param version: The version number + :param body: + :param dry_run: Simulate the update + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if history_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'history_id'") + if version in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'version'") + if body is None and body is None: + raise ValueError( + "Empty value passed for parameters 'body' and 'body', one of them should be set." + ) + elif body is not None and body is not None: + raise ValueError("Cannot set both 'body' and 'body'") + __path_parts: t.Dict[str, str] = { + "history_id": _quote(history_id), + "version": _quote(version), + } + __path = f'/_internal/desired_nodes/{__path_parts["history_id"]}/{__path_parts["version"]}' + __query: t.Dict[str, t.Any] = {} + if dry_run is not None: + __query["dry_run"] = dry_run + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = body if body is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="_internal.update_desired_nodes", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index fc121e9ca..e7971565d 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -3301,10 +3301,20 @@ async def segments( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, + allow_closed: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ @@ -3355,6 +3365,8 @@ async def segments( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -3378,6 +3390,14 @@ async def segments( :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + :param allow_closed: If true, allow closed indices to be returned in the response + otherwise if false, keep the legacy behaviour of throwing an exception if + index pattern matches closed indices + :param allow_no_indices: If false, the request returns an error if any wildcard + expression, index alias, or _all value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting foo*,bar* returns an error if an index starts + with foo but no index starts with bar. :param bytes: Sets the units for columns that contain a byte-size value. Note that byte-size value units work in terms of powers of 1024. For instance `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are @@ -3386,12 +3406,20 @@ async def segments( least `1.0`. If given, byte-size values are rendered as an integer with no suffix, representing the value of the column in the chosen unit. Values that are not an exact multiple of the chosen unit are rounded down. + :param expand_wildcards: Type of index that wildcard expressions can match. If + the request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values, such + as open,hidden. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored + when frozen. + :param ignore_unavailable: If true, missing or closed indices are not included + in the response. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -3416,10 +3444,16 @@ async def segments( __path_parts = {} __path = "/_cat/segments" __query: t.Dict[str, t.Any] = {} + if allow_closed is not None: + __query["allow_closed"] = allow_closed + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if format is not None: @@ -3430,6 +3464,10 @@ async def segments( __query["help"] = help if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local if master_timeout is not None: diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index a98428ffb..a7f3ada32 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -125,8 +125,8 @@ async def follow( """ .. raw:: html -Create a follower. - Create a cross-cluster replication follower index that follows a specific leader index. +
Create a follower.
+Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.
@@ -368,8 +368,8 @@ async def forget_follower( """ .. raw:: html -Forget a follower. - Remove the cross-cluster replication follower retention leases from the leader.
+Forget a follower.
+Remove the cross-cluster replication follower retention leases from the leader.
A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. @@ -640,8 +640,8 @@ async def put_auto_follow_pattern( """ .. raw:: html -
Create or update auto-follow patterns. - Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +
Create or update auto-follow patterns.
+Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern.
This API can also be used to update auto-follow patterns. @@ -853,8 +853,8 @@ async def resume_follow( """ .. raw:: html -
Resume a follower. - Resume a cross-cluster replication follower index that was paused. +
Resume a follower.
+Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index.
diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index ced198b56..15517b4c9 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -47,8 +47,8 @@ async def allocation_explain( """ .. raw:: html -Explain the shard allocations. - Get explanations for shard allocations in the cluster. +
Explain the shard allocations.
+Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. @@ -127,8 +127,8 @@ async def delete_component_template( """ .. raw:: html -
Delete component templates. - Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
+Delete component templates.
+Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
`Clear cluster voting config exclusions. - Remove master-eligible nodes from the voting configuration exclusion list.
+Clear cluster voting config exclusions.
+Remove master-eligible nodes from the voting configuration exclusion list.
`Check component templates. - Returns information about whether a particular component template exists.
+Check component templates.
+Returns information about whether a particular component template exists.
`Get component templates. - Get information about component templates.
+Get component templates.
+Get information about component templates.
`Get cluster info. - Returns basic information about the cluster.
+Get cluster info.
+Returns basic information about the cluster.
`Get the pending cluster tasks. - Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.
+Get the pending cluster tasks.
+Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.
NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.
@@ -674,8 +674,8 @@ async def post_voting_config_exclusions( """ .. raw:: html -Update voting configuration exclusions. - Update the cluster voting config exclusions by node IDs or node names. +
Update voting configuration exclusions.
+Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. @@ -757,8 +757,8 @@ async def put_component_template( """ .. raw:: html -
Create or update a component template. - Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
+Create or update a component template.
+Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
An index template can be composed of multiple component templates.
To use a component template, specify it in an index template’s composed_of list.
Component templates are only applied to new data streams and indices as part of a matching index template.
Reroute the cluster. - Manually change the allocation of individual shards in the cluster. +
Reroute the cluster.
+Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node.
It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as cluster.routing.rebalance.enable) in order to remain in a balanced state.
For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out.
Get the cluster state. - Get comprehensive information about the state of the cluster.
+Get the cluster state.
+Get comprehensive information about the state of the cluster.
The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.
The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. @@ -1124,7 +1124,8 @@ async def state( when unavailable (missing or closed) :param local: Return local information, do not retrieve the state from master node (default: false) - :param master_timeout: Specify timeout for connection to master + :param master_timeout: Timeout for waiting for new cluster state in case it is + blocked :param wait_for_metadata_version: Wait for the metadata version to be equal or greater than the specified metadata version :param wait_for_timeout: The maximum time to wait for wait_for_metadata_version @@ -1193,8 +1194,8 @@ async def stats( """ .. raw:: html -
Get cluster statistics. - Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).
+Get cluster statistics.
+Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).
`Deletes a connector secret.
+ + + :param id: The ID of the secret + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_connector/_secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.secret_delete", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def secret_get( + self, + *, + id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Retrieves a secret stored by Connectors.
+ + + :param id: The ID of the secret + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_connector/_secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.secret_get", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("value",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def secret_post( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + value: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Creates a secret for a Connector.
+ + + :param value: + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_connector/_secret" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if value is not None: + __body["value"] = value + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.secret_post", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("value",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def secret_put( + self, + *, + id: str, + value: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Creates or updates a secret for a Connector.
+ + + :param id: The ID of the secret + :param value: + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if value is None and body is None: + raise ValueError("Empty value passed for parameter 'value'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_connector/_secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if value is not None: + __body["value"] = value + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.secret_put", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_cancel( @@ -643,8 +829,8 @@ async def sync_job_check_in( """ .. raw:: html -Check in a connector sync job.
- Check in a connector sync job and set the last_seen field to the current time before updating it in the internal index.
Check in a connector sync job.
+Check in a connector sync job and set the last_seen field to the current time before updating it in the internal index.
To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.
@@ -700,8 +886,8 @@ async def sync_job_claim( """ .. raw:: html -Claim a connector sync job.
- This action updates the job status to in_progress and sets the last_seen and started_at timestamps to the current time.
+
Claim a connector sync job.
+This action updates the job status to in_progress and sets the last_seen and started_at timestamps to the current time.
Additionally, it can set the sync_cursor property for the sync job.
This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch.
@@ -819,8 +1005,8 @@ async def sync_job_error( """ .. raw:: html -Set a connector sync job error.
- Set the error field for a connector sync job and set its status to error.
Set a connector sync job error.
+Set the error field for a connector sync job and set its status to error.
To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.
@@ -1086,8 +1272,8 @@ async def sync_job_update_stats( """ .. raw:: html -Set the connector sync job stats.
- Stats include: deleted_document_count, indexed_document_count, indexed_document_volume, and total_document_count.
+
Set the connector sync job stats.
+Stats include: deleted_document_count, indexed_document_count, indexed_document_volume, and total_document_count.
You can also update last_seen.
This API is mainly used by the connector service for updating sync job information.
To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. @@ -1402,8 +1588,8 @@ async def update_features( """ .. raw:: html -
Update the connector features. - Update the connector features in the connector document. +
Update the connector features.
+Update the connector features in the connector document. This API can be used to control the following aspects of a connector:
Delete a dangling index. - If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +
Delete a dangling index.
+If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.
For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.
Delete an enrich policy. - Deletes an existing enrich policy and its enrich index.
+Delete an enrich policy.
+Deletes an existing enrich policy and its enrich index.
`Run an enrich policy. - Create the enrich index for an existing enrich policy.
+Run an enrich policy.
+Create the enrich index for an existing enrich policy.
`Get an enrich policy. - Returns information about an enrich policy.
+Get an enrich policy.
+Returns information about an enrich policy.
`Create an enrich policy. - Creates an enrich policy.
+Create an enrich policy.
+Creates an enrich policy.
`Get enrich stats. - Returns enrich coordinator statistics and information about enrich policies that are currently executing.
+Get enrich stats.
+Returns enrich coordinator statistics and information about enrich policies that are currently executing.
`Delete an async EQL search. - Delete an async EQL search or a stored synchronous EQL search. +
Delete an async EQL search.
+Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search.
@@ -89,8 +89,8 @@ async def get( """ .. raw:: html -Get async EQL search results. - Get the current status and available results for an async EQL search or a stored synchronous EQL search.
+Get async EQL search results.
+Get the current status and available results for an async EQL search or a stored synchronous EQL search.
`Get the async EQL status. - Get the current status for an async EQL search or a stored synchronous EQL search without returning results.
+Get the async EQL status.
+Get the current status for an async EQL search or a stored synchronous EQL search without returning results.
`Get EQL search results. - Returns search results for an Event Query Language (EQL) query. +
Get EQL search results.
+Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event.
@@ -291,7 +291,7 @@ async def search( Defaults to 10 :param tiebreaker_field: Field used to sort hits with the same timestamp in ascending order - :param timestamp_field: Field containing event timestamp. Default "@timestamp" + :param timestamp_field: Field containing event timestamp. :param wait_for_completion_timeout: """ if index in SKIP_IN_PATH: diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index 38e642779..28b10716d 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -88,8 +88,8 @@ async def async_query( """ .. raw:: html -Run an async ES|QL query. - Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.
+Run an async ES|QL query.
+Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.
The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.
@@ -218,8 +218,8 @@ async def async_query_delete( """ .. raw:: html -Delete an async ES|QL query. - If the query is still running, it is cancelled. +
Delete an async ES|QL query.
+If the query is still running, it is cancelled. Otherwise, the stored results are deleted.
If the Elasticsearch security features are enabled, only the following users can use this API to delete a query:
Get async ES|QL query results. - Get the current status and available results or stored results for an ES|QL asynchronous query. +
Get async ES|QL query results.
+Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.
@@ -409,8 +409,8 @@ async def get_query( """ .. raw:: html -Get a specific running ES|QL query information. - Returns an object extended information about a running ES|QL query.
+Get a specific running ES|QL query information.
+Returns an object extended information about a running ES|QL query.
`Get running ES|QL queries information. - Returns an object containing IDs and other information about the running ES|QL queries.
+Get running ES|QL queries information.
+Returns an object containing IDs and other information about the running ES|QL queries.
`Run an ES|QL query. - Get search results for an ES|QL (Elasticsearch query language) query.
+Run an ES|QL query.
+Get search results for an ES|QL (Elasticsearch query language) query.
`Get the features.
- Get a list of features that can be included in snapshots using the feature_states field when creating a snapshot.
+
Get the features.
+Get a list of features that can be included in snapshots using the feature_states field when creating a snapshot.
You can use this API to determine which feature states to include when taking a snapshot.
By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not.
A feature state includes one or more system indices necessary for a given feature to function. @@ -89,8 +89,8 @@ async def reset_features( """ .. raw:: html -
Reset the features. - Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices.
+Reset the features.
+Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices.
WARNING: Intended for development and testing use only. Do not reset features on a production cluster.
Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices.
diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index f1ea60007..12c2f4e4f 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -31,6 +31,90 @@ class FleetClient(NamespacedClient): + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def delete_secret( + self, + *, + id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Deletes a secret stored by Fleet.
+ + + :param id: The ID of the secret + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_fleet/secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="fleet.delete_secret", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def get_secret( + self, + *, + id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Retrieves a secret stored by Fleet.
+ + + :param id: The ID of the secret + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_fleet/secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="fleet.get_secret", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def global_checkpoints( self, @@ -138,8 +222,8 @@ async def msearch( """ .. raw:: html -Run multiple Fleet searches. - Run several Fleet searches with a single API request. +
Run multiple Fleet searches.
+Run several Fleet searches with a single API request.
The API follows the same structure as the multi search API.
However, similar to the Fleet search API, it supports the wait_for_checkpoints parameter.
Creates a secret stored by Fleet.
+ + + :param value: + """ + if value is None and body is None: + raise ValueError("Empty value passed for parameter 'value'") + __path_parts: t.Dict[str, str] = {} + __path = "/_fleet/secret" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if value is not None: + __body["value"] = value + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="fleet.post_secret", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "aggregations", @@ -388,8 +522,8 @@ async def search( """ .. raw:: html -Run a Fleet search. - The purpose of the Fleet search API is to provide an API where the search will be run only +
Run a Fleet search.
+The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.
diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index a8c35dbfe..aaf82c0b0 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -47,8 +47,8 @@ async def explore( """ .. raw:: html -Explore graph analytics. - Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. +
Explore graph analytics.
+Extract and summarize information about the documents and terms in an Elasticsearch data stream or index.
The easiest way to understand the behavior of this API is to use the Graph UI to explore connections.
An initial request to the _explore API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph.
Subsequent requests enable you to spider out from one more vertices of interest.
diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py
index c15aaa807..011fa95e9 100644
--- a/elasticsearch/_async/client/ilm.py
+++ b/elasticsearch/_async/client/ilm.py
@@ -40,8 +40,8 @@ async def delete_lifecycle(
"""
.. raw:: html
-
Delete a lifecycle policy. - You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.
+Delete a lifecycle policy.
+You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.
`Explain the lifecycle state. - Get the current lifecycle status for one or more indices. +
Explain the lifecycle state.
+Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices.
The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.
@@ -260,8 +260,8 @@ async def migrate_to_data_tiers( """ .. raw:: html -Migrate to data tiers routing. - Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +
Migrate to data tiers routing.
+Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers.
Migrating away from custom node attributes routing can be manually performed. @@ -341,8 +341,8 @@ async def move_to_step( """ .. raw:: html -
Move to a lifecycle step. - Manually move an index into a specific step in the lifecycle policy and run that step.
+Move to a lifecycle step.
+Manually move an index into a specific step in the lifecycle policy and run that step.
WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.
You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index @@ -417,8 +417,8 @@ async def put_lifecycle( """ .. raw:: html -
Create or update a lifecycle policy. - If the specified policy exists, it is replaced and the policy version is incremented.
+Create or update a lifecycle policy.
+If the specified policy exists, it is replaced and the policy version is incremented.
NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.
@@ -481,8 +481,8 @@ async def remove_policy( """ .. raw:: html -Remove policies from an index. - Remove the assigned lifecycle policies from an index or a data stream's backing indices. +
Remove policies from an index.
+Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices.
@@ -526,8 +526,8 @@ async def retry( """ .. raw:: html -Retry a policy. - Retry running the lifecycle policy for an index that is in the ERROR step. +
Retry a policy.
+Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step.
@@ -574,8 +574,8 @@ async def start( """ .. raw:: html -Start the ILM plugin. - Start the index lifecycle management plugin if it is currently stopped. +
Start the ILM plugin.
+Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API.
@@ -627,8 +627,8 @@ async def stop( """ .. raw:: html -Stop the ILM plugin. - Halt all lifecycle management operations and stop the index lifecycle management plugin. +
Stop the ILM plugin.
+Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices.
The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running.
diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 13e217943..91cefebd8 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -165,8 +165,8 @@ async def analyze( """ .. raw:: html -Get tokens from text analysis. - The analyze API performs analysis on a text string and returns the resulting tokens.
+Get tokens from text analysis.
+The analyze API performs analysis on a text string and returns the resulting tokens.
Generating excessive amount of tokens may cause a node to run out of memory.
The index.analyze.max_token_count setting enables you to limit the number of tokens that can be produced.
If more than this limit of tokens gets generated, an error occurs.
@@ -319,8 +319,8 @@ async def clear_cache(
"""
.. raw:: html
-
Clear the cache. - Clear the cache of one or more indices. +
Clear the cache.
+Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices.
By default, the clear cache API clears all caches.
To clear only specific caches, use the fielddata, query, or request parameters.
@@ -412,8 +412,8 @@ async def clone(
"""
.. raw:: html
-
Clone an index. - Clone an existing index into a new index. +
Clone an index.
+Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index.
IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. @@ -537,8 +537,8 @@ async def close( """ .. raw:: html -
Close an index. - A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +
Close an index.
+A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.
When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. @@ -634,8 +634,8 @@ async def create( """ .. raw:: html -
Create an index. - You can use the create index API to add a new index to an Elasticsearch cluster. +
Create an index.
+You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following:
Delete indices. - Deleting an index deletes its documents, shards, and metadata. +
Delete indices.
+Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards.
You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. @@ -1008,8 +1008,8 @@ async def delete_alias( """ .. raw:: html -
Delete an alias. - Removes a data stream or index from an alias.
+Delete an alias.
+Removes a data stream or index from an alias.
`Delete data stream lifecycles. - Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.
+Delete data stream lifecycles.
+Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.
`Delete data streams. - Deletes one or more data streams and their backing indices.
+Delete data streams.
+Deletes one or more data streams and their backing indices.
`Delete data stream options. - Removes the data stream options from a data stream.
+Delete data stream options.
+Removes the data stream options from a data stream.
`Delete an index template. - The provided may contain multiple template names separated by a comma. If multiple template +
Delete an index template.
+The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates.
@@ -1319,8 +1319,8 @@ async def delete_template( """ .. raw:: html -Delete a legacy index template. - IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
+Delete a legacy index template.
+IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
`Analyze the index disk usage. - Analyze the disk usage of each field of an index or data stream. +
Analyze the index disk usage.
+Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.
NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index store_size value because some small metadata files are ignored and some parts of data files might not be scanned by the API.
@@ -1469,8 +1469,8 @@ async def downsample(
"""
.. raw:: html
-
Downsample an index.
- Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval.
+
Downsample an index.
+Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval.
For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.
All documents within an hour interval are summarized and stored as a single document in the downsample index.
NOTE: Only indices in a time series data stream are supported. @@ -1546,8 +1546,8 @@ async def exists( """ .. raw:: html -
Check indices. - Check if one or more indices, index aliases, or data streams exist.
+Check indices.
+Check if one or more indices, index aliases, or data streams exist.
`Check existence of index templates. - Get information about whether index templates exist. +
Check existence of index templates.
+Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
@@ -1826,8 +1826,8 @@ async def explain_data_lifecycle( """ .. raw:: html -Get the status for a data stream lifecycle. - Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.
+Get the status for a data stream lifecycle.
+Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.
`Get field usage stats. - Get field usage information for each shard and field of an index. +
Get field usage stats.
+Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.
The response body reports the per-shard usage count of the data structures that back the fields in the index. @@ -1971,8 +1971,8 @@ async def flush( """ .. raw:: html -
Flush data streams or indices. - Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +
Flush data streams or indices.
+Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.
After each operation has been flushed it is permanently stored in the Lucene index. @@ -2065,8 +2065,8 @@ async def forcemerge( """ .. raw:: html -
Force a merge. - Perform the force merge operation on the shards of one or more indices. +
Force a merge.
+Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices.
Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually.
@@ -2200,8 +2200,8 @@ async def get( """ .. raw:: html -Get index information. - Get information about one or more indices. For data streams, the API returns information about the +
Get index information.
+Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices.
@@ -2294,8 +2294,8 @@ async def get_alias( """ .. raw:: html -Get aliases. - Retrieves information for one or more data stream or index aliases.
+Get aliases.
+Retrieves information for one or more data stream or index aliases.
`Get data stream lifecycle stats. - Get statistics about the data streams that are managed by a data stream lifecycle.
+Get data stream lifecycle stats.
+Get statistics about the data streams that are managed by a data stream lifecycle.
`Get mapping definitions. - Retrieves mapping definitions for one or more fields. +
Get mapping definitions.
+Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices.
This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.
@@ -2761,14 +2761,14 @@ async def get_index_template( """ .. raw:: html -Get index templates. - Get information about one or more index templates.
+Get index templates.
+Get information about one or more index templates.
`Get mapping definitions. - For data streams, the API retrieves mappings for the stream’s backing indices.
+Get mapping definitions.
+For data streams, the API retrieves mappings for the stream’s backing indices.
`Get index settings. - Get setting information for one or more indices. +
Get index settings.
+Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices.
@@ -3063,8 +3063,8 @@ async def get_template( """ .. raw:: html -Get legacy index templates. - Get information about one or more index templates.
+Get legacy index templates.
+Get information about one or more index templates.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
@@ -3183,8 +3183,8 @@ async def migrate_to_data_stream( """ .. raw:: html -Convert an index alias to a data stream. - Converts an index alias to a data stream. +
Convert an index alias to a data stream.
+Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; @@ -3248,8 +3248,8 @@ async def modify_data_stream( """ .. raw:: html -
Update data streams. - Performs one or more data stream modification actions in a single atomic operation.
+Update data streams.
+Performs one or more data stream modification actions in a single atomic operation.
`Open a closed index. - For data streams, the API opens any closed backing indices.
+Open a closed index.
+For data streams, the API opens any closed backing indices.
A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.
@@ -3406,8 +3406,8 @@ async def promote_data_stream( """ .. raw:: html -Promote a data stream. - Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.
+Promote a data stream.
+Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.
With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. @@ -3479,8 +3479,8 @@ async def put_alias( """ .. raw:: html -
Create or update an alias. - Adds a data stream or index to an alias.
+Create or update an alias.
+Adds a data stream or index to an alias.
`Update data stream lifecycles. - Update the data stream lifecycle of the specified data streams.
+Update data stream lifecycles.
+Update the data stream lifecycle of the specified data streams.
`Update data stream options. - Update the data stream options of the specified data streams.
+Update data stream options.
+Update the data stream options of the specified data streams.
`Create or update an index template. - Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
+Create or update an index template.
+Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. @@ -4039,8 +4039,8 @@ async def put_mapping( """ .. raw:: html -
Update field mappings. - Add new fields to an existing data stream or index. +
Update field mappings.
+Add new fields to an existing data stream or index. You can use the update mapping API to:
Update index settings. - Changes dynamic index settings in real time. +
Update index settings.
+Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.
To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. @@ -4338,8 +4338,8 @@ async def put_template( """ .. raw:: html -
Create or update a legacy index template. - Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +
Create or update a legacy index template.
+Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
Composable templates always take precedence over legacy templates. @@ -4446,8 +4446,8 @@ async def recovery( """ .. raw:: html -
Get index recovery information. - Get information about ongoing and completed shard recoveries for one or more indices. +
Get index recovery information.
+Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices.
All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.
Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. @@ -4544,8 +4544,8 @@ async def refresh( """ .. raw:: html -
Refresh an index. - A refresh makes recent operations performed on one or more indices available for search. +
Refresh an index.
+A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices.
By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.
You can change this default interval with the index.refresh_interval setting.
Reload search analyzers. - Reload an index's search analyzers and their resources. +
Reload search analyzers.
+Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices.
IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.
You can use the reload search analyzers API to pick up changes to synonym files used in the synonym_graph or synonym token filter of a search analyzer.
@@ -4943,8 +4943,8 @@ async def resolve_index(
"""
.. raw:: html
-
Resolve indices. - Resolve the names and/or index patterns for indices, aliases, and data streams. +
Resolve indices.
+Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported.
@@ -5022,8 +5022,8 @@ async def rollover( """ .. raw:: html -Roll over to a new index. - TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.
+Roll over to a new index.
+TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.
The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target.
Roll over a data stream
@@ -5159,8 +5159,8 @@ async def segments( """ .. raw:: html -Get index segments. - Get low-level information about the Lucene segments in index shards. +
Get index segments.
+Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices.
@@ -5240,8 +5240,8 @@ async def shard_stores( """ .. raw:: html -Get index shard stores. - Get store information about replica shards in one or more indices. +
Get index shard stores.
+Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices.
The index shard stores API returns the following information:
Shrink an index. - Shrink an index into a new index with fewer primary shards.
+Shrink an index.
+Shrink an index into a new index with fewer primary shards.
Before you can shrink an index:
Simulate an index. - Get the index configuration that would be applied to the specified index from an existing index template.
+Simulate an index.
+Get the index configuration that would be applied to the specified index from an existing index template.
`Simulate an index template. - Get the index configuration that would be applied by a particular index template.
+Simulate an index template.
+Get the index configuration that would be applied by a particular index template.
`Split an index. - Split an index into a new index with more primary shards.
+Split an index.
+Split an index into a new index with more primary shards.
Before you can split an index:
@@ -5803,8 +5803,8 @@ async def stats( """ .. raw:: html -Get index statistics. - For data streams, the API retrieves statistics for the stream's backing indices.
+Get index statistics.
+For data streams, the API retrieves statistics for the stream's backing indices.
By default, the returned statistics are index-level with primaries and total aggregations.
primaries are the values for only the primary shards.
total are the accumulated values for both primary and replica shards.
Create or update an alias. - Adds a data stream or index to an alias.
+Create or update an alias.
+Adds a data stream or index to an alias.
`Validate a query. - Validates a query without running it.
+Validate a query.
+Validates a query without running it.
`Perform completion inference on the service
+Perform completion inference on the service.
`Delete an inference endpoint
+Delete an inference endpoint.
`Get an inference endpoint
+Get an inference endpoint.
`Perform reranking inference on the service
+Perform reranking inference on the service.
`Perform sparse embedding inference on the service
+Perform sparse embedding inference on the service.
`Perform text embedding inference on the service
+Perform text embedding inference on the service.
`Delete pipelines. - Delete one or more ingest pipelines.
+Delete pipelines.
+Delete one or more ingest pipelines.
`Get GeoIP statistics. - Get download statistics for GeoIP2 databases that are used with the GeoIP processor.
+Get GeoIP statistics.
+Get download statistics for GeoIP2 databases that are used with the GeoIP processor.
`Run a grok processor. - Extract structured fields out of a single text field within a document. +
Run a grok processor.
+Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused.
@@ -609,8 +609,8 @@ async def put_pipeline( """ .. raw:: html -Create or update a pipeline. - Changes made using this API take effect immediately.
+Create or update a pipeline.
+Changes made using this API take effect immediately.
`Start a trial. - Start a 30-day trial, which gives access to all subscription features.
+Start a trial.
+Start a 30-day trial, which gives access to all subscription features.
NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension.
To check the status of your trial, use the get trial status API.
diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index c724911dc..bc6639925 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -38,8 +38,8 @@ async def delete_pipeline( """ .. raw:: html -Delete a Logstash pipeline. - Delete a pipeline that is used for Logstash Central Management. +
Delete a Logstash pipeline.
+Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code.
@@ -83,8 +83,8 @@ async def get_pipeline( """ .. raw:: html -Get Logstash pipelines. - Get pipelines that are used for Logstash Central Management.
+Get Logstash pipelines.
+Get pipelines that are used for Logstash Central Management.
`Get deprecation information. - Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.
+Get deprecation information.
+Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.
TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant.
@@ -87,8 +87,8 @@ async def get_feature_upgrade_status( """ .. raw:: html -Get feature migration information. - Version upgrades sometimes require changes to how features store configuration information and data in system indices. +
Get feature migration information.
+Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress.
TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant.
@@ -129,8 +129,8 @@ async def post_feature_upgrade( """ .. raw:: html -Start the feature migration. - Version upgrades sometimes require changes to how features store configuration information and data in system indices. +
Start the feature migration.
+Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process.
Some functionality might be temporarily unavailable during the migration process.
TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.
diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index 6876e45bd..b5298312f 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -1101,8 +1101,8 @@ async def flush_job( """ .. raw:: html -Force buffered data to be processed. - The flush jobs API is only applicable when sending data for analysis using +
Force buffered data to be processed.
+The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send @@ -1269,8 +1269,8 @@ async def get_buckets( """ .. raw:: html -
Get anomaly detection job results for buckets. - The API presents a chronological view of the records, grouped by bucket.
+Get anomaly detection job results for buckets.
+The API presents a chronological view of the records, grouped by bucket.
`Get data frame analytics job configuration info. - You can get information for multiple data frame analytics jobs in a single +
Get data frame analytics job configuration info.
+You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression.
@@ -1744,8 +1744,8 @@ async def get_datafeed_stats( """ .. raw:: html -Get datafeed stats. - You can get statistics for multiple datafeeds in a single API request by +
Get datafeed stats.
+You can get statistics for multiple datafeeds in a single API request by
using a comma-separated list of datafeeds or a wildcard expression. You can
get statistics for all datafeeds by using _all, by specifying * as the
<feed_id>, or by omitting the <feed_id>. If the datafeed is stopped, the
@@ -1809,8 +1809,8 @@ async def get_datafeeds(
"""
.. raw:: html
-
Get datafeeds configuration info. - You can get information for multiple datafeeds in a single API request by +
Get datafeeds configuration info.
+You can get information for multiple datafeeds in a single API request by
using a comma-separated list of datafeeds or a wildcard expression. You can
get information for all datafeeds by using _all, by specifying * as the
<feed_id>, or by omitting the <feed_id>.
@@ -1880,8 +1880,8 @@ async def get_filters(
"""
.. raw:: html
-
Get filters. - You can get a single filter or all filters.
+Get filters.
+You can get a single filter or all filters.
`Get anomaly detection job results for influencers. - Influencers are the entities that have contributed to, or are to blame for, +
Get anomaly detection job results for influencers.
+Influencers are the entities that have contributed to, or are to blame for,
the anomalies. Influencer results are available only if an
influencer_field_name is specified in the job configuration.
Get anomaly detection jobs configuration info. - You can get information for multiple anomaly detection jobs in a single API +
Get anomaly detection jobs configuration info.
+You can get information for multiple anomaly detection jobs in a single API
request by using a group name, a comma-separated list of jobs, or a wildcard
expression. You can get information for all anomaly detection jobs by using
_all, by specifying * as the <job_id>, or by omitting the <job_id>.
Get machine learning memory usage info. - Get information about how machine learning jobs and trained models are using memory, +
Get machine learning memory usage info.
+Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM.
@@ -2515,8 +2515,8 @@ async def get_records( """ .. raw:: html -Get anomaly records for an anomaly detection job. - Records contain the detailed analytical results. They describe the anomalous +
Get anomaly records for an anomaly detection job.
+Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size @@ -2708,8 +2708,8 @@ async def get_trained_models_stats( """ .. raw:: html -
Get trained models usage info. - You can get usage information for multiple trained +
Get trained models usage info.
+You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression.
@@ -2836,8 +2836,8 @@ async def info( """ .. raw:: html -Get machine learning information. - Get defaults and limits used by machine learning. +
Get machine learning information.
+Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be @@ -3076,8 +3076,8 @@ async def preview_data_frame_analytics( """ .. raw:: html -
Preview features used by data frame analytics. - Preview the extracted features used by a data frame analytics config.
+Preview features used by data frame analytics.
+Preview the extracted features used by a data frame analytics config.
`Preview a datafeed. - This API returns the first "page" of search results from a datafeed. +
Preview a datafeed.
+This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. @@ -3364,8 +3364,8 @@ async def put_data_frame_analytics( """ .. raw:: html -
Create a data frame analytics job. - This API creates a data frame analytics job that performs an analysis on the +
Create a data frame analytics job.
+This API creates a data frame analytics job that performs an analysis on the
source indices and stores the outcome in a destination index.
By default, the query used in the source configuration is {"match_all": {}}.
If the destination index does not exist, it is created automatically when you start the job.
@@ -3545,8 +3545,8 @@ async def put_datafeed( """ .. raw:: html -Create a datafeed. - Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. +
Create a datafeed.
+Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.
You can associate only one datafeed with each anomaly detection job.
The datafeed contains a query that runs at a defined interval (frequency).
If you are concerned about delayed data, you can add a delay (query_delay') at each interval. By default, the datafeed uses the following query: {"match_all": {"boost": 1}}`.
Create a filter. - A filter contains a list of strings. It can be used by one or more anomaly detection jobs. +
Create a filter.
+A filter contains a list of strings. It can be used by one or more anomaly detection jobs.
Specifically, filters are referenced in the custom_rules property of detector configuration objects.
Create a trained model. - Enable you to supply a trained model that is not created by data frame analytics.
+Create a trained model.
+Enable you to supply a trained model that is not created by data frame analytics.
`Create or update a trained model alias. - A trained model alias is a logical name used to reference a single trained +
Create or update a trained model alias.
+A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference @@ -4282,8 +4282,8 @@ async def put_trained_model_vocabulary( """ .. raw:: html -
Create a trained model vocabulary. - This API is supported only for natural language processing (NLP) models. +
Create a trained model vocabulary.
+This API is supported only for natural language processing (NLP) models.
The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.
Reset an anomaly detection job. - All model state and results are deleted. The job is ready to start over as if +
Reset an anomaly detection job.
+All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list.
@@ -4404,8 +4404,8 @@ async def revert_model_snapshot( """ .. raw:: html -Revert to a snapshot. - The machine learning features react quickly to anomalous input, learning new +
Revert to a snapshot.
+The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a @@ -4474,8 +4474,8 @@ async def set_upgrade_mode( """ .. raw:: html -
Set upgrade_mode for ML indices. - Sets a cluster wide upgrade_mode setting that prepares machine learning +
Set upgrade_mode for ML indices.
+Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, @@ -4535,8 +4535,8 @@ async def start_data_frame_analytics( """ .. raw:: html -
Start a data frame analytics job. - A data frame analytics job can be started and stopped multiple times +
Start a data frame analytics job.
+A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The @@ -4685,8 +4685,8 @@ async def start_trained_model_deployment( """ .. raw:: html -
Start a trained model deployment. - It allocates the model to every machine learning node.
+Start a trained model deployment.
+It allocates the model to every machine learning node.
`Stop data frame analytics jobs. - A data frame analytics job can be started and stopped multiple times +
Stop data frame analytics jobs.
+A data frame analytics job can be started and stopped multiple times throughout its lifecycle.
@@ -4855,8 +4855,8 @@ async def stop_datafeed( """ .. raw:: html -Stop datafeeds. - A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped +
Stop datafeeds.
+A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.
@@ -5101,8 +5101,8 @@ async def update_datafeed( """ .. raw:: html -Update a datafeed. - You must stop and start the datafeed for the changes to be applied. +
Update a datafeed.
+You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead.
@@ -5265,8 +5265,8 @@ async def update_filter( """ .. raw:: html -Update a filter. - Updates the description of a filter, adds items, or removes items from the list.
+Update a filter.
+Updates the description of a filter, adds items, or removes items from the list.
`Update an anomaly detection job. - Updates certain properties of an anomaly detection job.
+Update an anomaly detection job.
+Updates certain properties of an anomaly detection job.
`Update a snapshot. - Updates certain properties of a snapshot.
+Update a snapshot.
+Updates certain properties of a snapshot.
`Upgrade a snapshot. - Upgrade an anomaly detection model snapshot to the latest major version. +
Upgrade a snapshot.
+Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index afc6406da..7876424f6 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -44,8 +44,8 @@ async def bulk( """ .. raw:: html -
Send monitoring data. - This API is used by the monitoring features to send monitoring data.
+Send monitoring data.
+This API is used by the monitoring features to send monitoring data.
`Clear the archived repositories metering. - Clear the archived repositories metering information in the cluster.
+Clear the archived repositories metering.
+Clear the archived repositories metering information in the cluster.
`Get cluster repositories metering. - Get repositories metering information for a cluster. +
Get cluster repositories metering.
+Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.
@@ -157,8 +157,8 @@ async def hot_threads( """ .. raw:: html -Get the hot threads for nodes. - Get a breakdown of the hot threads on each selected node in the cluster. +
Get the hot threads for nodes.
+Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node.
@@ -378,8 +378,8 @@ async def stats( """ .. raw:: html -Get node statistics. - Get statistics for nodes in a cluster. +
Get node statistics.
+Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics.
diff --git a/elasticsearch/_async/client/profiling.py b/elasticsearch/_async/client/profiling.py new file mode 100644 index 000000000..30e7ab52e --- /dev/null +++ b/elasticsearch/_async/client/profiling.py @@ -0,0 +1,177 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +class C: + + @_rewrite_parameters( + body_name="conditions", + ) + async def flamegraph( + self, + *, + conditions: t.Optional[t.Any] = None, + body: t.Optional[t.Any] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Returns basic information about the status of Universal Profiling.
+ + + `Extracts raw stacktrace information from Universal Profiling.
+ + + `Returns basic information about the status of Universal Profiling.
+ + + `Delete a query rule. - Delete a query rule within a query ruleset. +
Delete a query rule.
+Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.
@@ -92,8 +92,8 @@ async def delete_ruleset( """ .. raw:: html -Delete a query ruleset. - Remove a query ruleset and its associated data. +
Delete a query ruleset.
+Remove a query ruleset and its associated data. This is a destructive action that is not recoverable.
@@ -138,8 +138,8 @@ async def get_rule( """ .. raw:: html -Get a query rule. - Get details about a query rule within a query ruleset.
+Get a query rule.
+Get details about a query rule within a query ruleset.
`Get a query ruleset. - Get details about a query ruleset.
+Get a query ruleset.
+Get details about a query ruleset.
`Get all query rulesets. - Get summarized information about the query rulesets.
+Get all query rulesets.
+Get summarized information about the query rulesets.
`Create or update a query rule. - Create or update a query rule within a query ruleset.
+Create or update a query rule.
+Create or update a query rule within a query ruleset.
IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. @@ -380,8 +380,8 @@ async def put_ruleset( """ .. raw:: html -
Create or update a query ruleset. - There is a limit of 100 rules per ruleset. +
Create or update a query ruleset.
+There is a limit of 100 rules per ruleset.
This limit can be increased by using the xpack.applications.rules.max_rules_per_ruleset cluster setting.
IMPORTANT: Due to limitations within pinned queries, you can only select documents using ids or docs, but cannot use both in single rule.
It is advised to use one or the other in query rulesets, to avoid errors.
@@ -442,8 +442,8 @@ async def test(
"""
.. raw:: html
-
Test a query ruleset. - Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.
+Test a query ruleset.
+Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.
`Get rollup job information. - Get the configuration, stats, and status of rollup jobs.
+Get rollup job information.
+Get the configuration, stats, and status of rollup jobs.
NOTE: This API returns only active (both STARTED and STOPPED) jobs.
If a job was created, ran for a while, then was deleted, the API does not return any details about it.
For details about a historical rollup job, the rollup capabilities API may be more useful.
Get the rollup job capabilities. - Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.
+Get the rollup job capabilities.
+Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.
This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine:
@@ -216,8 +216,8 @@ async def get_rollup_index_caps( """ .. raw:: html -Get the rollup index capabilities. - Get the rollup capabilities of all jobs inside of a rollup index. +
Get the rollup index capabilities.
+Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine:
Search rolled-up data. - The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +
Search rolled-up data.
+The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.
The request body supports a subset of features from the regular search API. The following functionality is not available:
@@ -495,8 +495,8 @@ async def start_job( """ .. raw:: html -Start rollup jobs. - If you try to start a job that does not exist, an exception occurs. +
Start rollup jobs.
+If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens.
@@ -543,8 +543,8 @@ async def stop_job( """ .. raw:: html -Stop rollup jobs. - If you try to stop a job that does not exist, an exception occurs. +
Stop rollup jobs.
+If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens.
Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped.
This is accomplished with the wait_for_completion query parameter, and optionally a timeout. For example:
Delete a behavioral analytics collection. - The associated data stream is also deleted.
+Delete a behavioral analytics collection.
+The associated data stream is also deleted.
`Get search applications. - Get information about search applications.
+Get search applications.
+Get information about search applications.
`Render a search application query. - Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +
Render a search application query.
+Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified.
If a parameter used in the search template is not specified in params, the parameter's default value will be used.
The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API.
You must have read privileges on the backing alias of the search application.
Run a search application search. - Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. +
Run a search application search.
+Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable.
diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index 9b6902fac..c0db57867 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -46,8 +46,8 @@ async def cache_stats( """ .. raw:: html -Get cache statistics. - Get statistics about the shared cache for partially mounted indices.
+Get cache statistics.
+Get statistics about the shared cache for partially mounted indices.
`Clear the cache. - Clear indices and data streams from the shared cache for partially mounted indices.
+Clear the cache.
+Clear indices and data streams from the shared cache for partially mounted indices.
`Mount a snapshot. - Mount a snapshot as a searchable snapshot index. +
Mount a snapshot.
+Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes.
diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index 516906ce8..131f5e913 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -288,8 +288,8 @@ async def bulk_update_api_keys( """ .. raw:: html -Bulk update API keys. - Update the attributes for multiple API keys.
+Bulk update API keys.
+Update the attributes for multiple API keys.
IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.
This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.
It is not possible to update expired or invalidated API keys.
@@ -3711,7 +3711,8 @@ async def query_role( :param size: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - :param sort: The sort definition. You can sort on `username`, `roles`, or `enabled`. + :param sort: The sort definition. You can sort on `name`, `description`, `metadata`, + `applications.application`, `applications.privileges`, and `applications.resources`. In addition, sort can also be applied to the `_doc` field to sort by index order. """ diff --git a/elasticsearch/_async/client/shutdown.py b/elasticsearch/_async/client/shutdown.py index 9502d1fe6..d11336696 100644 --- a/elasticsearch/_async/client/shutdown.py +++ b/elasticsearch/_async/client/shutdown.py @@ -40,8 +40,8 @@ async def delete_node( """ .. raw:: html -Cancel node shutdown preparations. - Remove a node from the shutdown list so it can resume normal operations. +
Cancel node shutdown preparations.
+Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch.
NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py index bb636ddb6..d4c7f5f0e 100644 --- a/elasticsearch/_async/client/simulate.py +++ b/elasticsearch/_async/client/simulate.py @@ -66,8 +66,8 @@ async def ingest( """ .. raw:: html -
Simulate data ingestion. - Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.
+Simulate data ingestion.
+Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.
This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.
The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index 3eaafd865..6221b52d1 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -40,8 +40,8 @@ async def delete_lifecycle( """ .. raw:: html -
Delete a policy. - Delete a snapshot lifecycle policy definition. +
Delete a policy.
+Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.
@@ -96,8 +96,8 @@ async def execute_lifecycle( """ .. raw:: html -Run a policy. - Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +
Run a policy.
+Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.
@@ -151,8 +151,8 @@ async def execute_retention( """ .. raw:: html -Run a retention policy. - Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +
Run a retention policy.
+Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule.
@@ -204,8 +204,8 @@ async def get_lifecycle( """ .. raw:: html -Get policy information. - Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.
+Get policy information.
+Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.
`Get snapshot lifecycle management statistics. - Get global and policy-level statistics about actions taken by snapshot lifecycle management.
+Get snapshot lifecycle management statistics.
+Get global and policy-level statistics about actions taken by snapshot lifecycle management.
`Create or update a policy. - Create or update a snapshot lifecycle policy. +
Create or update a policy.
+Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored.
@@ -460,8 +460,8 @@ async def start( """ .. raw:: html -Start snapshot lifecycle management. - Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +
Start snapshot lifecycle management.
+Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API.
@@ -514,8 +514,8 @@ async def stop( """ .. raw:: html -Stop snapshot lifecycle management. - Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +
Stop snapshot lifecycle management.
+Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped.
diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index 31a3dff57..41e92bb9f 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -46,8 +46,8 @@ async def cleanup_repository( """ .. raw:: html -Clean up the snapshot repository. - Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.
+Clean up the snapshot repository.
+Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.
`Clone a snapshot. - Clone part of all of a snapshot into another snapshot in the same repository.
+Clone a snapshot.
+Clone part of all of a snapshot into another snapshot in the same repository.
`Create a snapshot. - Take a snapshot of a cluster or of data streams and indices.
+Create a snapshot.
+Take a snapshot of a cluster or of data streams and indices.
`Create or update a snapshot repository. - IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +
Create or update a snapshot repository.
+IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.
To register a snapshot repository, the cluster's global metadata must be writeable.
Ensure there are no cluster blocks (for example, cluster.blocks.read_only and clsuter.blocks.read_only_allow_delete settings) that prevent write access.
Several options for this API can be specified using a query parameter or a request body parameter. @@ -470,8 +470,8 @@ async def delete_repository( """ .. raw:: html -
Delete snapshot repositories. - When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +
Delete snapshot repositories.
+When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place.
@@ -971,8 +971,8 @@ async def repository_verify_integrity( """ .. raw:: html -Verify the repository integrity. - Verify the integrity of the contents of a snapshot repository.
+Verify the repository integrity.
+Verify the integrity of the contents of a snapshot repository.
This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail.
If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its read_only option to true, and use this API to verify its integrity.
Until you do so:
Restore a snapshot. - Restore a snapshot of a cluster or data streams and indices.
+Restore a snapshot.
+Restore a snapshot of a cluster or data streams and indices.
You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible.
@@ -1259,8 +1259,8 @@ async def status( """ .. raw:: html -Get the snapshot status. - Get a detailed description of the current state for each shard participating in the snapshot.
+Get the snapshot status.
+Get a detailed description of the current state for each shard participating in the snapshot.
Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.
If you omit the <snapshot> request path parameter, the request retrieves information only for currently running snapshots.
@@ -1337,8 +1337,8 @@ async def verify_repository(
"""
.. raw:: html
-
Verify a snapshot repository. - Check for common misconfigurations in a snapshot repository.
+Verify a snapshot repository.
+Check for common misconfigurations in a snapshot repository.
`Delete an async SQL search. - Delete an async SQL search or a stored synchronous SQL search. +
Delete an async SQL search.
+Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it.
If the Elasticsearch security features are enabled, only the following users can use this API to delete a search:
Get async SQL search results. - Get the current status and available results for an async SQL search or stored synchronous SQL search.
+Get async SQL search results.
+Get the current status and available results for an async SQL search or stored synchronous SQL search.
If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.
@@ -208,8 +208,8 @@ async def get_async_status( """ .. raw:: html -Get the async SQL search status. - Get the current status of an async SQL search or a stored synchronous SQL search.
+Get the async SQL search status.
+Get the current status of an async SQL search or a stored synchronous SQL search.
`Get SQL search results. - Run an SQL request.
+Get SQL search results.
+Run an SQL request.
`Translate SQL into Elasticsearch queries. - Translate an SQL search into a search API request containing Query DSL. +
Translate SQL into Elasticsearch queries.
+Translate an SQL search into a search API request containing Query DSL.
It accepts the same request body parameters as the SQL search API, excluding cursor.
Delete a synonym rule. - Delete a synonym rule from a synonym set.
+Delete a synonym rule.
+Delete a synonym rule from a synonym set.
`Get a synonym rule. - Get a synonym rule from a synonym set.
+Get a synonym rule.
+Get a synonym rule from a synonym set.
`Get all synonym sets. - Get a summary of all defined synonym sets.
+Get all synonym sets.
+Get a summary of all defined synonym sets.
`Create or update a synonym set. - Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +
Create or update a synonym set.
+Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets.
When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.
@@ -378,8 +378,8 @@ async def put_synonym_rule( """ .. raw:: html -Create or update a synonym rule. - Create or update a synonym rule in a synonym set.
+Create or update a synonym rule.
+Create or update a synonym rule in a synonym set.
If any of the synonym rules included is invalid, the API returns an error.
When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.
diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index 96230cc4c..85dbc856c 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -121,8 +121,8 @@ async def get( """ .. raw:: html -Get task information. - Get information about a task currently running in the cluster.
+Get task information.
+Get information about a task currently running in the cluster.
WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.
If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.
@@ -185,8 +185,8 @@ async def list( """ .. raw:: html -Get all tasks. - Get information about the tasks currently running on one or more nodes in the cluster.
+Get all tasks.
+Get information about the tasks currently running on one or more nodes in the cluster.
WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.
Identifying running tasks
diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index 6307f20bb..98d3cbf82 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -55,8 +55,8 @@ async def find_field_structure( """ .. raw:: html -Find the structure of a text field. - Find the structure of a text field in an Elasticsearch index.
+Find the structure of a text field.
+Find the structure of a text field in an Elasticsearch index.
This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch.
For example, if you have ingested data into a very simple index that has just @timestamp and message fields, you can use this API to see what common structure exists in the message field.
The response from the API contains:
@@ -241,8 +241,8 @@ async def find_message_structure( """ .. raw:: html -Find the structure of text messages. - Find the structure of a list of text messages. +
Find the structure of text messages.
+Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch.
This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.
@@ -402,7 +402,11 @@ async def find_structure( delimiter: t.Optional[str] = None, ecs_compatibility: t.Optional[str] = None, explain: t.Optional[bool] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, grok_pattern: t.Optional[str] = None, has_header_row: t.Optional[bool] = None, line_merge_size_limit: t.Optional[int] = None, @@ -416,8 +420,8 @@ async def find_structure( """ .. raw:: html -Find the structure of a text file. - The text file must contain data that is suitable to be ingested into Elasticsearch.
+Find the structure of a text file.
+The text file must contain data that is suitable to be ingested into Elasticsearch.
This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. @@ -615,8 +619,8 @@ async def test_grok_pattern( """ .. raw:: html -
Test a Grok pattern. - Test a Grok pattern on one or more lines of text. +
Test a Grok pattern.
+Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.
diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 249fa35cb..0e0918512 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -104,8 +104,8 @@ async def get_transform( """ .. raw:: html -Get transforms. - Get configuration information for transforms.
+Get transforms.
+Get configuration information for transforms.
`Preview a transform. - Generates a preview of the results that you will get when you create a transform with the same configuration.
+Preview a transform.
+Generates a preview of the results that you will get when you create a transform with the same configuration.
It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations.
@@ -386,8 +386,8 @@ async def put_transform( """ .. raw:: html -Create a transform. - Creates a transform.
+Create a transform.
+Creates a transform.
A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity.
@@ -616,8 +616,8 @@ async def set_upgrade_mode( """ .. raw:: html -Set upgrade_mode for transform indices. - Sets a cluster wide upgrade_mode setting that prepares transform +
Set upgrade_mode for transform indices.
+Sets a cluster wide upgrade_mode setting that prepares transform indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your transform indices. In those circumstances, @@ -749,8 +749,8 @@ async def stop_transform( """ .. raw:: html -
Stop transforms. - Stops one or more transforms.
+Stop transforms.
+Stops one or more transforms.
`Update a transform. - Updates certain properties of a transform.
+Update a transform.
+Updates certain properties of a transform.
All updated properties except description do not take effect until after the transform starts the next checkpoint,
thus there is data consistency in each checkpoint. To use this API, you must have read and view_index_metadata
privileges for the source indices. You must also have index and read privileges for the destination index. When
diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py
index 8558b9920..b2433f09c 100644
--- a/elasticsearch/_async/client/watcher.py
+++ b/elasticsearch/_async/client/watcher.py
@@ -39,8 +39,8 @@ async def ack_watch(
"""
.. raw:: html
-
Acknowledge a watch. - Acknowledging a watch enables you to manually throttle the execution of the watch's actions.
+Acknowledge a watch.
+Acknowledging a watch enables you to manually throttle the execution of the watch's actions.
The acknowledgement state of an action is stored in the status.actions.<id>.ack.state structure.
IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution.
@@ -101,8 +101,8 @@ async def activate_watch( """ .. raw:: html -Activate a watch. - A watch can be either active or inactive.
+Activate a watch.
+A watch can be either active or inactive.
`Deactivate a watch. - A watch can be either active or inactive.
+Deactivate a watch.
+A watch can be either active or inactive.
`Delete a watch.
- When the watch is removed, the document representing the watch in the .watches index is gone and it will never be run again.
Delete a watch.
+When the watch is removed, the document representing the watch in the .watches index is gone and it will never be run again.
Deleting a watch does not delete any watch execution records related to this watch from the watch history.
IMPORTANT: Deleting a watch must be done by using only this API.
Do not delete the watch directly from the .watches index using the Elasticsearch delete document API
@@ -266,8 +266,8 @@ async def execute_watch(
"""
.. raw:: html
-
Run a watch. - This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.
+Run a watch.
+This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.
For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.
@@ -362,8 +362,8 @@ async def get_settings( """ .. raw:: html -Get Watcher index settings.
- Get settings for the Watcher internal index (.watches).
+
Get Watcher index settings.
+Get settings for the Watcher internal index (.watches).
Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.
Create or update a watch.
- When a watch is registered, a new document that represents the watch is added to the .watches index and its trigger is immediately registered with the relevant trigger engine.
+
Create or update a watch.
+When a watch is registered, a new document that represents the watch is added to the .watches index and its trigger is immediately registered with the relevant trigger engine.
Typically for the schedule trigger, the scheduler is the trigger engine.
IMPORTANT: You must use Kibana or this API to create a watch.
Do not add a watch directly to the .watches index by using the Elasticsearch index API.
@@ -591,8 +591,8 @@ async def query_watches(
"""
.. raw:: html
-
Query watches. - Get all registered watches in a paginated manner and optionally filter watches by a query.
+Query watches.
+Get all registered watches in a paginated manner and optionally filter watches by a query.
Note that only the _id and metadata.* fields are queryable or sortable.
Start the watch service. - Start the Watcher service if it is not already running.
+Start the watch service.
+Start the Watcher service if it is not already running.
`Get Watcher statistics. - This API always returns basic metrics. +
Get Watcher statistics.
+This API always returns basic metrics. You retrieve more metrics by using the metric parameter.
@@ -784,8 +784,8 @@ async def stop( """ .. raw:: html -Stop the watch service. - Stop the Watcher service if it is running.
+Stop the watch service.
+Stop the Watcher service if it is running.
`Update Watcher index settings.
- Update settings for the Watcher internal index (.watches).
+
Update Watcher index settings.
+Update settings for the Watcher internal index (.watches).
Only a subset of settings can be modified.
This includes index.auto_expand_replicas, index.number_of_replicas, index.routing.allocation.exclude.*,
index.routing.allocation.include.* and index.routing.allocation.require.*.
diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py
index 710d36cb1..deaccf81f 100644
--- a/elasticsearch/_async/client/xpack.py
+++ b/elasticsearch/_async/client/xpack.py
@@ -45,8 +45,8 @@ async def info(
"""
.. raw:: html
-
Get information. - The information provided by the API includes:
+Get information.
+The information provided by the API includes:
Get usage information. - Get information about the features that are currently enabled and available under the current license. +
Get usage information.
+Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics.
diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index bfe8e0928..c3e4e3fb6 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -565,8 +565,8 @@ def bulk( """ .. raw:: html -Bulk index or delete documents.
- Perform multiple index, create, delete, and update actions in a single request.
+
Bulk index or delete documents.
+Perform multiple index, create, delete, and update actions in a single request.
This reduces overhead and can greatly increase indexing speed.
If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:
Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported.
+ + + `Clear a scrolling search. - Clear the search context and results for a scrolling search.
+Clear a scrolling search.
+Clear the search context and results for a scrolling search.
`Close a point in time. - A point in time must be opened explicitly before being used in search requests. +
Close a point in time.
+A point in time must be opened explicitly before being used in search requests.
The keep_alive parameter tells Elasticsearch how long it should persist.
A point in time is automatically closed when the keep_alive period has elapsed.
However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.
Count search results. - Get the number of documents matching a query.
+Count search results.
+Get the number of documents matching a query.
The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.
The query is optional. When no query is provided, the API uses match_all to count all the documents.
The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.
@@ -1643,11 +1710,11 @@ def delete_by_query_rethrottle( self, *, task_id: str, + requests_per_second: float, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -1665,9 +1732,13 @@ def delete_by_query_rethrottle( """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") + if requests_per_second is None: + raise ValueError("Empty value passed for parameter 'requests_per_second'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_delete_by_query/{__path_parts["task_id"]}/_rethrottle' __query: t.Dict[str, t.Any] = {} + if requests_per_second is not None: + __query["requests_per_second"] = requests_per_second if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -1676,8 +1747,6 @@ def delete_by_query_rethrottle( __query["human"] = human if pretty is not None: __query["pretty"] = pretty - if requests_per_second is not None: - __query["requests_per_second"] = requests_per_second __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", @@ -1703,8 +1772,8 @@ def delete_script( """ .. raw:: html -Delete a script or search template. - Deletes a stored script or search template.
+Delete a script or search template.
+Deletes a stored script or search template.
`Explain a document match result. - Get information about why a specific document matches, or doesn't match, a query. +
Explain a document match result.
+Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document.
@@ -2419,8 +2488,8 @@ def get_script( """ .. raw:: html -Get a script or search template. - Retrieves a stored script or search template.
+Get a script or search template.
+Retrieves a stored script or search template.
`Get the cluster health. - Get a report with the health status of an Elasticsearch cluster. +
Get the cluster health.
+Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality.
Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status.
@@ -2969,8 +3038,8 @@ def info( """ .. raw:: html -Get cluster info. - Get basic build, version, and cluster information. +
Get cluster info.
+Get basic build, version, and cluster information. ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored.
@@ -3664,8 +3733,8 @@ def put_script( """ .. raw:: html -Create or update a script or search template. - Creates or updates a stored script or search template.
+Create or update a script or search template.
+Creates or updates a stored script or search template.
`Update documents. - Updates documents that match the specified query. +
Update documents.
+Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.
If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:
This API is a diagnostics API and the output should not be relied upon for building applications.
+ + + :param master_timeout: Period to wait for a connection to the master node. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/desired_balance" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.delete_desired_balance", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def delete_desired_nodes( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Designed for indirect use by ECE/ESS and ECK, direct use is not supported.
+ + + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/desired_nodes" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.delete_desired_nodes", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def get_desired_balance( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +This API is a diagnostics API and the output should not be relied upon for building applications.
+ + + :param master_timeout: Period to wait for a connection to the master node. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/desired_balance" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.get_desired_balance", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def get_desired_nodes( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Gets the latest desired nodes.
+ + + :param master_timeout: Period to wait for a connection to the master node. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/desired_nodes/_latest" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.get_desired_nodes", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def prevalidate_node_removal( + self, + *, + error_trace: t.Optional[bool] = None, + external_ids: t.Optional[t.Sequence[str]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + ids: t.Optional[t.Sequence[str]] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + names: t.Optional[t.Sequence[str]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Prevalidates node removal from the cluster.
+ + + :param external_ids: A comma-separated list of node external IDs to prevalidate + :param ids: A comma-separated list of node IDs to prevalidate + :param master_timeout: Period to wait for a connection to the master node. + :param names: A comma-separated list of node names to prevalidate + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_internal/prevalidate_node_removal" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if external_ids is not None: + __query["external_ids"] = external_ids + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if ids is not None: + __query["ids"] = ids + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if names is not None: + __query["names"] = names + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="_internal.prevalidate_node_removal", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_name="body", + ignore_deprecated_options={"body"}, + ) + @_stability_warning(Stability.EXPERIMENTAL) + def update_desired_nodes( + self, + *, + history_id: str, + version: int, + body: t.Optional[t.Any] = None, + body: t.Optional[t.Any] = None, + dry_run: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Designed for indirect use by ECE/ESS and ECK, direct use is not supported.
+ + + :param history_id: The history ID + :param version: The version number + :param body: + :param dry_run: Simulate the update + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if history_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'history_id'") + if version in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'version'") + if body is None and body is None: + raise ValueError( + "Empty value passed for parameters 'body' and 'body', one of them should be set." + ) + elif body is not None and body is not None: + raise ValueError("Cannot set both 'body' and 'body'") + __path_parts: t.Dict[str, str] = { + "history_id": _quote(history_id), + "version": _quote(version), + } + __path = f'/_internal/desired_nodes/{__path_parts["history_id"]}/{__path_parts["version"]}' + __query: t.Dict[str, t.Any] = {} + if dry_run is not None: + __query["dry_run"] = dry_run + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = body if body is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="_internal.update_desired_nodes", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 46b6820ae..3cf5c70e0 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -3301,10 +3301,20 @@ def segments( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, + allow_closed: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ @@ -3355,6 +3365,8 @@ def segments( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -3378,6 +3390,14 @@ def segments( :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + :param allow_closed: If true, allow closed indices to be returned in the response + otherwise if false, keep the legacy behaviour of throwing an exception if + index pattern matches closed indices + :param allow_no_indices: If false, the request returns an error if any wildcard + expression, index alias, or _all value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting foo*,bar* returns an error if an index starts + with foo but no index starts with bar. :param bytes: Sets the units for columns that contain a byte-size value. Note that byte-size value units work in terms of powers of 1024. For instance `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are @@ -3386,12 +3406,20 @@ def segments( least `1.0`. If given, byte-size values are rendered as an integer with no suffix, representing the value of the column in the chosen unit. Values that are not an exact multiple of the chosen unit are rounded down. + :param expand_wildcards: Type of index that wildcard expressions can match. If + the request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values, such + as open,hidden. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored + when frozen. + :param ignore_unavailable: If true, missing or closed indices are not included + in the response. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -3416,10 +3444,16 @@ def segments( __path_parts = {} __path = "/_cat/segments" __query: t.Dict[str, t.Any] = {} + if allow_closed is not None: + __query["allow_closed"] = allow_closed + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if format is not None: @@ -3430,6 +3464,10 @@ def segments( __query["help"] = help if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local if master_timeout is not None: diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index 0eec10516..cf1535764 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -125,8 +125,8 @@ def follow( """ .. raw:: html -Create a follower. - Create a cross-cluster replication follower index that follows a specific leader index. +
Create a follower.
+Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.
@@ -368,8 +368,8 @@ def forget_follower( """ .. raw:: html -Forget a follower. - Remove the cross-cluster replication follower retention leases from the leader.
+Forget a follower.
+Remove the cross-cluster replication follower retention leases from the leader.
A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. @@ -640,8 +640,8 @@ def put_auto_follow_pattern( """ .. raw:: html -
Create or update auto-follow patterns. - Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +
Create or update auto-follow patterns.
+Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern.
This API can also be used to update auto-follow patterns. @@ -853,8 +853,8 @@ def resume_follow( """ .. raw:: html -
Resume a follower. - Resume a cross-cluster replication follower index that was paused. +
Resume a follower.
+Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index.
diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index fab832aae..ec402d467 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -47,8 +47,8 @@ def allocation_explain( """ .. raw:: html -Explain the shard allocations. - Get explanations for shard allocations in the cluster. +
Explain the shard allocations.
+Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. @@ -127,8 +127,8 @@ def delete_component_template( """ .. raw:: html -
Delete component templates. - Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
+Delete component templates.
+Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
`Clear cluster voting config exclusions. - Remove master-eligible nodes from the voting configuration exclusion list.
+Clear cluster voting config exclusions.
+Remove master-eligible nodes from the voting configuration exclusion list.
`Check component templates. - Returns information about whether a particular component template exists.
+Check component templates.
+Returns information about whether a particular component template exists.
`Get component templates. - Get information about component templates.
+Get component templates.
+Get information about component templates.
`Get cluster info. - Returns basic information about the cluster.
+Get cluster info.
+Returns basic information about the cluster.
`Get the pending cluster tasks. - Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.
+Get the pending cluster tasks.
+Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.
NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.
@@ -674,8 +674,8 @@ def post_voting_config_exclusions( """ .. raw:: html -Update voting configuration exclusions. - Update the cluster voting config exclusions by node IDs or node names. +
Update voting configuration exclusions.
+Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. @@ -757,8 +757,8 @@ def put_component_template( """ .. raw:: html -
Create or update a component template. - Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
+Create or update a component template.
+Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
An index template can be composed of multiple component templates.
To use a component template, specify it in an index template’s composed_of list.
Component templates are only applied to new data streams and indices as part of a matching index template.
Reroute the cluster. - Manually change the allocation of individual shards in the cluster. +
Reroute the cluster.
+Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node.
It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as cluster.routing.rebalance.enable) in order to remain in a balanced state.
For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out.
Get the cluster state. - Get comprehensive information about the state of the cluster.
+Get the cluster state.
+Get comprehensive information about the state of the cluster.
The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.
The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. @@ -1124,7 +1124,8 @@ def state( when unavailable (missing or closed) :param local: Return local information, do not retrieve the state from master node (default: false) - :param master_timeout: Specify timeout for connection to master + :param master_timeout: Timeout for waiting for new cluster state in case it is + blocked :param wait_for_metadata_version: Wait for the metadata version to be equal or greater than the specified metadata version :param wait_for_timeout: The maximum time to wait for wait_for_metadata_version @@ -1193,8 +1194,8 @@ def stats( """ .. raw:: html -
Get cluster statistics. - Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).
+Get cluster statistics.
+Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).
`Deletes a connector secret.
+ + + :param id: The ID of the secret + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_connector/_secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.secret_delete", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def secret_get( + self, + *, + id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Retrieves a secret stored by Connectors.
+ + + :param id: The ID of the secret + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_connector/_secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.secret_get", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("value",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def secret_post( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + value: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Creates a secret for a Connector.
+ + + :param value: + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_connector/_secret" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if value is not None: + __body["value"] = value + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.secret_post", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("value",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def secret_put( + self, + *, + id: str, + value: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Creates or updates a secret for a Connector.
+ + + :param id: The ID of the secret + :param value: + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if value is None and body is None: + raise ValueError("Empty value passed for parameter 'value'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_connector/_secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if value is not None: + __body["value"] = value + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.secret_put", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_cancel( @@ -643,8 +829,8 @@ def sync_job_check_in( """ .. raw:: html -Check in a connector sync job.
- Check in a connector sync job and set the last_seen field to the current time before updating it in the internal index.
Check in a connector sync job.
+Check in a connector sync job and set the last_seen field to the current time before updating it in the internal index.
To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.
@@ -700,8 +886,8 @@ def sync_job_claim( """ .. raw:: html -Claim a connector sync job.
- This action updates the job status to in_progress and sets the last_seen and started_at timestamps to the current time.
+
Claim a connector sync job.
+This action updates the job status to in_progress and sets the last_seen and started_at timestamps to the current time.
Additionally, it can set the sync_cursor property for the sync job.
This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch.
@@ -819,8 +1005,8 @@ def sync_job_error( """ .. raw:: html -Set a connector sync job error.
- Set the error field for a connector sync job and set its status to error.
Set a connector sync job error.
+Set the error field for a connector sync job and set its status to error.
To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.
@@ -1086,8 +1272,8 @@ def sync_job_update_stats( """ .. raw:: html -Set the connector sync job stats.
- Stats include: deleted_document_count, indexed_document_count, indexed_document_volume, and total_document_count.
+
Set the connector sync job stats.
+Stats include: deleted_document_count, indexed_document_count, indexed_document_volume, and total_document_count.
You can also update last_seen.
This API is mainly used by the connector service for updating sync job information.
To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. @@ -1402,8 +1588,8 @@ def update_features( """ .. raw:: html -
Update the connector features. - Update the connector features in the connector document. +
Update the connector features.
+Update the connector features in the connector document. This API can be used to control the following aspects of a connector:
Delete a dangling index. - If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +
Delete a dangling index.
+If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.
For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.
Delete an enrich policy. - Deletes an existing enrich policy and its enrich index.
+Delete an enrich policy.
+Deletes an existing enrich policy and its enrich index.
`Run an enrich policy. - Create the enrich index for an existing enrich policy.
+Run an enrich policy.
+Create the enrich index for an existing enrich policy.
`Get an enrich policy. - Returns information about an enrich policy.
+Get an enrich policy.
+Returns information about an enrich policy.
`Create an enrich policy. - Creates an enrich policy.
+Create an enrich policy.
+Creates an enrich policy.
`Get enrich stats. - Returns enrich coordinator statistics and information about enrich policies that are currently executing.
+Get enrich stats.
+Returns enrich coordinator statistics and information about enrich policies that are currently executing.
`Delete an async EQL search. - Delete an async EQL search or a stored synchronous EQL search. +
Delete an async EQL search.
+Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search.
@@ -89,8 +89,8 @@ def get( """ .. raw:: html -Get async EQL search results. - Get the current status and available results for an async EQL search or a stored synchronous EQL search.
+Get async EQL search results.
+Get the current status and available results for an async EQL search or a stored synchronous EQL search.
`Get the async EQL status. - Get the current status for an async EQL search or a stored synchronous EQL search without returning results.
+Get the async EQL status.
+Get the current status for an async EQL search or a stored synchronous EQL search without returning results.
`Get EQL search results. - Returns search results for an Event Query Language (EQL) query. +
Get EQL search results.
+Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event.
@@ -291,7 +291,7 @@ def search( Defaults to 10 :param tiebreaker_field: Field used to sort hits with the same timestamp in ascending order - :param timestamp_field: Field containing event timestamp. Default "@timestamp" + :param timestamp_field: Field containing event timestamp. :param wait_for_completion_timeout: """ if index in SKIP_IN_PATH: diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 80843c2d3..e544c13ed 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -88,8 +88,8 @@ def async_query( """ .. raw:: html -Run an async ES|QL query. - Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.
+Run an async ES|QL query.
+Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.
The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.
@@ -218,8 +218,8 @@ def async_query_delete( """ .. raw:: html -Delete an async ES|QL query. - If the query is still running, it is cancelled. +
Delete an async ES|QL query.
+If the query is still running, it is cancelled. Otherwise, the stored results are deleted.
If the Elasticsearch security features are enabled, only the following users can use this API to delete a query:
Get async ES|QL query results. - Get the current status and available results or stored results for an ES|QL asynchronous query. +
Get async ES|QL query results.
+Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.
@@ -409,8 +409,8 @@ def get_query( """ .. raw:: html -Get a specific running ES|QL query information. - Returns an object extended information about a running ES|QL query.
+Get a specific running ES|QL query information.
+Returns an object extended information about a running ES|QL query.
`Get running ES|QL queries information. - Returns an object containing IDs and other information about the running ES|QL queries.
+Get running ES|QL queries information.
+Returns an object containing IDs and other information about the running ES|QL queries.
`Run an ES|QL query. - Get search results for an ES|QL (Elasticsearch query language) query.
+Run an ES|QL query.
+Get search results for an ES|QL (Elasticsearch query language) query.
`Get the features.
- Get a list of features that can be included in snapshots using the feature_states field when creating a snapshot.
+
Get the features.
+Get a list of features that can be included in snapshots using the feature_states field when creating a snapshot.
You can use this API to determine which feature states to include when taking a snapshot.
By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not.
A feature state includes one or more system indices necessary for a given feature to function. @@ -89,8 +89,8 @@ def reset_features( """ .. raw:: html -
Reset the features. - Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices.
+Reset the features.
+Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices.
WARNING: Intended for development and testing use only. Do not reset features on a production cluster.
Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices.
diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index 44178398d..1be131fdb 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -31,6 +31,90 @@ class FleetClient(NamespacedClient): + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def delete_secret( + self, + *, + id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Deletes a secret stored by Fleet.
+ + + :param id: The ID of the secret + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_fleet/secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="fleet.delete_secret", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def get_secret( + self, + *, + id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Retrieves a secret stored by Fleet.
+ + + :param id: The ID of the secret + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_fleet/secret/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="fleet.get_secret", + path_parts=__path_parts, + ) + @_rewrite_parameters() def global_checkpoints( self, @@ -138,8 +222,8 @@ def msearch( """ .. raw:: html -Run multiple Fleet searches. - Run several Fleet searches with a single API request. +
Run multiple Fleet searches.
+Run several Fleet searches with a single API request.
The API follows the same structure as the multi search API.
However, similar to the Fleet search API, it supports the wait_for_checkpoints parameter.
Creates a secret stored by Fleet.
+ + + :param value: + """ + if value is None and body is None: + raise ValueError("Empty value passed for parameter 'value'") + __path_parts: t.Dict[str, str] = {} + __path = "/_fleet/secret" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if value is not None: + __body["value"] = value + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="fleet.post_secret", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "aggregations", @@ -388,8 +522,8 @@ def search( """ .. raw:: html -Run a Fleet search. - The purpose of the Fleet search API is to provide an API where the search will be run only +
Run a Fleet search.
+The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.
diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index 735917b80..e7ede6586 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -47,8 +47,8 @@ def explore( """ .. raw:: html -Explore graph analytics. - Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. +
Explore graph analytics.
+Extract and summarize information about the documents and terms in an Elasticsearch data stream or index.
The easiest way to understand the behavior of this API is to use the Graph UI to explore connections.
An initial request to the _explore API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph.
Subsequent requests enable you to spider out from one more vertices of interest.
diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py
index 26424284a..33400218e 100644
--- a/elasticsearch/_sync/client/ilm.py
+++ b/elasticsearch/_sync/client/ilm.py
@@ -40,8 +40,8 @@ def delete_lifecycle(
"""
.. raw:: html
-
Delete a lifecycle policy. - You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.
+Delete a lifecycle policy.
+You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.
`Explain the lifecycle state. - Get the current lifecycle status for one or more indices. +
Explain the lifecycle state.
+Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices.
The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.
@@ -260,8 +260,8 @@ def migrate_to_data_tiers( """ .. raw:: html -Migrate to data tiers routing. - Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +
Migrate to data tiers routing.
+Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers.
Migrating away from custom node attributes routing can be manually performed. @@ -341,8 +341,8 @@ def move_to_step( """ .. raw:: html -
Move to a lifecycle step. - Manually move an index into a specific step in the lifecycle policy and run that step.
+Move to a lifecycle step.
+Manually move an index into a specific step in the lifecycle policy and run that step.
WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.
You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index @@ -417,8 +417,8 @@ def put_lifecycle( """ .. raw:: html -
Create or update a lifecycle policy. - If the specified policy exists, it is replaced and the policy version is incremented.
+Create or update a lifecycle policy.
+If the specified policy exists, it is replaced and the policy version is incremented.
NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.
@@ -481,8 +481,8 @@ def remove_policy( """ .. raw:: html -Remove policies from an index. - Remove the assigned lifecycle policies from an index or a data stream's backing indices. +
Remove policies from an index.
+Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices.
@@ -526,8 +526,8 @@ def retry( """ .. raw:: html -Retry a policy. - Retry running the lifecycle policy for an index that is in the ERROR step. +
Retry a policy.
+Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step.
@@ -574,8 +574,8 @@ def start( """ .. raw:: html -Start the ILM plugin. - Start the index lifecycle management plugin if it is currently stopped. +
Start the ILM plugin.
+Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API.
@@ -627,8 +627,8 @@ def stop( """ .. raw:: html -Stop the ILM plugin. - Halt all lifecycle management operations and stop the index lifecycle management plugin. +
Stop the ILM plugin.
+Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices.
The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running.
diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index e40be2cd7..35f321234 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -165,8 +165,8 @@ def analyze( """ .. raw:: html -Get tokens from text analysis. - The analyze API performs analysis on a text string and returns the resulting tokens.
+Get tokens from text analysis.
+The analyze API performs analysis on a text string and returns the resulting tokens.
Generating excessive amount of tokens may cause a node to run out of memory.
The index.analyze.max_token_count setting enables you to limit the number of tokens that can be produced.
If more than this limit of tokens gets generated, an error occurs.
@@ -319,8 +319,8 @@ def clear_cache(
"""
.. raw:: html
-
Clear the cache. - Clear the cache of one or more indices. +
Clear the cache.
+Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices.
By default, the clear cache API clears all caches.
To clear only specific caches, use the fielddata, query, or request parameters.
@@ -412,8 +412,8 @@ def clone(
"""
.. raw:: html
-
Clone an index. - Clone an existing index into a new index. +
Clone an index.
+Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index.
IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. @@ -537,8 +537,8 @@ def close( """ .. raw:: html -
Close an index. - A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +
Close an index.
+A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.
When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. @@ -634,8 +634,8 @@ def create( """ .. raw:: html -
Create an index. - You can use the create index API to add a new index to an Elasticsearch cluster. +
Create an index.
+You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following:
Delete indices. - Deleting an index deletes its documents, shards, and metadata. +
Delete indices.
+Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards.
You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. @@ -1008,8 +1008,8 @@ def delete_alias( """ .. raw:: html -
Delete an alias. - Removes a data stream or index from an alias.
+Delete an alias.
+Removes a data stream or index from an alias.
`Delete data stream lifecycles. - Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.
+Delete data stream lifecycles.
+Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.
`Delete data streams. - Deletes one or more data streams and their backing indices.
+Delete data streams.
+Deletes one or more data streams and their backing indices.
`Delete data stream options. - Removes the data stream options from a data stream.
+Delete data stream options.
+Removes the data stream options from a data stream.
`Delete an index template. - The provided may contain multiple template names separated by a comma. If multiple template +
Delete an index template.
+The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates.
@@ -1319,8 +1319,8 @@ def delete_template( """ .. raw:: html -Delete a legacy index template. - IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
+Delete a legacy index template.
+IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
`Analyze the index disk usage. - Analyze the disk usage of each field of an index or data stream. +
Analyze the index disk usage.
+Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.
NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index store_size value because some small metadata files are ignored and some parts of data files might not be scanned by the API.
@@ -1469,8 +1469,8 @@ def downsample(
"""
.. raw:: html
-
Downsample an index.
- Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval.
+
Downsample an index.
+Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval.
For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.
All documents within an hour interval are summarized and stored as a single document in the downsample index.
NOTE: Only indices in a time series data stream are supported. @@ -1546,8 +1546,8 @@ def exists( """ .. raw:: html -
Check indices. - Check if one or more indices, index aliases, or data streams exist.
+Check indices.
+Check if one or more indices, index aliases, or data streams exist.
`Check existence of index templates. - Get information about whether index templates exist. +
Check existence of index templates.
+Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
@@ -1826,8 +1826,8 @@ def explain_data_lifecycle( """ .. raw:: html -Get the status for a data stream lifecycle. - Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.
+Get the status for a data stream lifecycle.
+Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.
`Get field usage stats. - Get field usage information for each shard and field of an index. +
Get field usage stats.
+Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.
The response body reports the per-shard usage count of the data structures that back the fields in the index. @@ -1971,8 +1971,8 @@ def flush( """ .. raw:: html -
Flush data streams or indices. - Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +
Flush data streams or indices.
+Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.
After each operation has been flushed it is permanently stored in the Lucene index. @@ -2065,8 +2065,8 @@ def forcemerge( """ .. raw:: html -
Force a merge. - Perform the force merge operation on the shards of one or more indices. +
Force a merge.
+Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices.
Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually.
@@ -2200,8 +2200,8 @@ def get( """ .. raw:: html -Get index information. - Get information about one or more indices. For data streams, the API returns information about the +
Get index information.
+Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices.
@@ -2294,8 +2294,8 @@ def get_alias( """ .. raw:: html -Get aliases. - Retrieves information for one or more data stream or index aliases.
+Get aliases.
+Retrieves information for one or more data stream or index aliases.
`Get data stream lifecycle stats. - Get statistics about the data streams that are managed by a data stream lifecycle.
+Get data stream lifecycle stats.
+Get statistics about the data streams that are managed by a data stream lifecycle.
`Get mapping definitions. - Retrieves mapping definitions for one or more fields. +
Get mapping definitions.
+Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices.
This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.
@@ -2761,14 +2761,14 @@ def get_index_template( """ .. raw:: html -Get index templates. - Get information about one or more index templates.
+Get index templates.
+Get information about one or more index templates.
`Get mapping definitions. - For data streams, the API retrieves mappings for the stream’s backing indices.
+Get mapping definitions.
+For data streams, the API retrieves mappings for the stream’s backing indices.
`Get index settings. - Get setting information for one or more indices. +
Get index settings.
+Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices.
@@ -3063,8 +3063,8 @@ def get_template( """ .. raw:: html -Get legacy index templates. - Get information about one or more index templates.
+Get legacy index templates.
+Get information about one or more index templates.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
@@ -3183,8 +3183,8 @@ def migrate_to_data_stream( """ .. raw:: html -Convert an index alias to a data stream. - Converts an index alias to a data stream. +
Convert an index alias to a data stream.
+Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; @@ -3248,8 +3248,8 @@ def modify_data_stream( """ .. raw:: html -
Update data streams. - Performs one or more data stream modification actions in a single atomic operation.
+Update data streams.
+Performs one or more data stream modification actions in a single atomic operation.
`Open a closed index. - For data streams, the API opens any closed backing indices.
+Open a closed index.
+For data streams, the API opens any closed backing indices.
A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.
@@ -3406,8 +3406,8 @@ def promote_data_stream( """ .. raw:: html -Promote a data stream. - Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.
+Promote a data stream.
+Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.
With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. @@ -3479,8 +3479,8 @@ def put_alias( """ .. raw:: html -
Create or update an alias. - Adds a data stream or index to an alias.
+Create or update an alias.
+Adds a data stream or index to an alias.
`Update data stream lifecycles. - Update the data stream lifecycle of the specified data streams.
+Update data stream lifecycles.
+Update the data stream lifecycle of the specified data streams.
`Update data stream options. - Update the data stream options of the specified data streams.
+Update data stream options.
+Update the data stream options of the specified data streams.
`Create or update an index template. - Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
+Create or update an index template.
+Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. @@ -4039,8 +4039,8 @@ def put_mapping( """ .. raw:: html -
Update field mappings. - Add new fields to an existing data stream or index. +
Update field mappings.
+Add new fields to an existing data stream or index. You can use the update mapping API to:
Update index settings. - Changes dynamic index settings in real time. +
Update index settings.
+Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.
To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. @@ -4338,8 +4338,8 @@ def put_template( """ .. raw:: html -
Create or update a legacy index template. - Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +
Create or update a legacy index template.
+Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
Composable templates always take precedence over legacy templates. @@ -4446,8 +4446,8 @@ def recovery( """ .. raw:: html -
Get index recovery information. - Get information about ongoing and completed shard recoveries for one or more indices. +
Get index recovery information.
+Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices.
All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.
Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. @@ -4544,8 +4544,8 @@ def refresh( """ .. raw:: html -
Refresh an index. - A refresh makes recent operations performed on one or more indices available for search. +
Refresh an index.
+A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices.
By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.
You can change this default interval with the index.refresh_interval setting.
Reload search analyzers. - Reload an index's search analyzers and their resources. +
Reload search analyzers.
+Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices.
IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.
You can use the reload search analyzers API to pick up changes to synonym files used in the synonym_graph or synonym token filter of a search analyzer.
@@ -4943,8 +4943,8 @@ def resolve_index(
"""
.. raw:: html
-
Resolve indices. - Resolve the names and/or index patterns for indices, aliases, and data streams. +
Resolve indices.
+Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported.
@@ -5022,8 +5022,8 @@ def rollover( """ .. raw:: html -Roll over to a new index. - TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.
+Roll over to a new index.
+TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.
The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target.
Roll over a data stream
@@ -5159,8 +5159,8 @@ def segments( """ .. raw:: html -Get index segments. - Get low-level information about the Lucene segments in index shards. +
Get index segments.
+Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices.
@@ -5240,8 +5240,8 @@ def shard_stores( """ .. raw:: html -Get index shard stores. - Get store information about replica shards in one or more indices. +
Get index shard stores.
+Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices.
The index shard stores API returns the following information:
Shrink an index. - Shrink an index into a new index with fewer primary shards.
+Shrink an index.
+Shrink an index into a new index with fewer primary shards.
Before you can shrink an index:
Simulate an index. - Get the index configuration that would be applied to the specified index from an existing index template.
+Simulate an index.
+Get the index configuration that would be applied to the specified index from an existing index template.
`Simulate an index template. - Get the index configuration that would be applied by a particular index template.
+Simulate an index template.
+Get the index configuration that would be applied by a particular index template.
`Split an index. - Split an index into a new index with more primary shards.
+Split an index.
+Split an index into a new index with more primary shards.
Before you can split an index:
@@ -5803,8 +5803,8 @@ def stats( """ .. raw:: html -Get index statistics. - For data streams, the API retrieves statistics for the stream's backing indices.
+Get index statistics.
+For data streams, the API retrieves statistics for the stream's backing indices.
By default, the returned statistics are index-level with primaries and total aggregations.
primaries are the values for only the primary shards.
total are the accumulated values for both primary and replica shards.
Create or update an alias. - Adds a data stream or index to an alias.
+Create or update an alias.
+Adds a data stream or index to an alias.
`Validate a query. - Validates a query without running it.
+Validate a query.
+Validates a query without running it.
`Perform completion inference on the service
+Perform completion inference on the service.
`Delete an inference endpoint
+Delete an inference endpoint.
`Get an inference endpoint
+Get an inference endpoint.
`Perform reranking inference on the service
+Perform reranking inference on the service.
`Perform sparse embedding inference on the service
+Perform sparse embedding inference on the service.
`Perform text embedding inference on the service
+Perform text embedding inference on the service.
`Delete pipelines. - Delete one or more ingest pipelines.
+Delete pipelines.
+Delete one or more ingest pipelines.
`Get GeoIP statistics. - Get download statistics for GeoIP2 databases that are used with the GeoIP processor.
+Get GeoIP statistics.
+Get download statistics for GeoIP2 databases that are used with the GeoIP processor.
`Run a grok processor. - Extract structured fields out of a single text field within a document. +
Run a grok processor.
+Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused.
@@ -609,8 +609,8 @@ def put_pipeline( """ .. raw:: html -Create or update a pipeline. - Changes made using this API take effect immediately.
+Create or update a pipeline.
+Changes made using this API take effect immediately.
`Start a trial. - Start a 30-day trial, which gives access to all subscription features.
+Start a trial.
+Start a 30-day trial, which gives access to all subscription features.
NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension.
To check the status of your trial, use the get trial status API.
diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index ae8e2a1dc..1241a7d26 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -38,8 +38,8 @@ def delete_pipeline( """ .. raw:: html -Delete a Logstash pipeline. - Delete a pipeline that is used for Logstash Central Management. +
Delete a Logstash pipeline.
+Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code.
@@ -83,8 +83,8 @@ def get_pipeline( """ .. raw:: html -Get Logstash pipelines. - Get pipelines that are used for Logstash Central Management.
+Get Logstash pipelines.
+Get pipelines that are used for Logstash Central Management.
`Get deprecation information. - Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.
+Get deprecation information.
+Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.
TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant.
@@ -87,8 +87,8 @@ def get_feature_upgrade_status( """ .. raw:: html -Get feature migration information. - Version upgrades sometimes require changes to how features store configuration information and data in system indices. +
Get feature migration information.
+Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress.
TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant.
@@ -129,8 +129,8 @@ def post_feature_upgrade( """ .. raw:: html -Start the feature migration. - Version upgrades sometimes require changes to how features store configuration information and data in system indices. +
Start the feature migration.
+Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process.
Some functionality might be temporarily unavailable during the migration process.
TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.
diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index a5e98142a..4c5577511 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -1101,8 +1101,8 @@ def flush_job( """ .. raw:: html -Force buffered data to be processed. - The flush jobs API is only applicable when sending data for analysis using +
Force buffered data to be processed.
+The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send @@ -1269,8 +1269,8 @@ def get_buckets( """ .. raw:: html -
Get anomaly detection job results for buckets. - The API presents a chronological view of the records, grouped by bucket.
+Get anomaly detection job results for buckets.
+The API presents a chronological view of the records, grouped by bucket.
`Get data frame analytics job configuration info. - You can get information for multiple data frame analytics jobs in a single +
Get data frame analytics job configuration info.
+You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression.
@@ -1744,8 +1744,8 @@ def get_datafeed_stats( """ .. raw:: html -Get datafeed stats. - You can get statistics for multiple datafeeds in a single API request by +
Get datafeed stats.
+You can get statistics for multiple datafeeds in a single API request by
using a comma-separated list of datafeeds or a wildcard expression. You can
get statistics for all datafeeds by using _all, by specifying * as the
<feed_id>, or by omitting the <feed_id>. If the datafeed is stopped, the
@@ -1809,8 +1809,8 @@ def get_datafeeds(
"""
.. raw:: html
-
Get datafeeds configuration info. - You can get information for multiple datafeeds in a single API request by +
Get datafeeds configuration info.
+You can get information for multiple datafeeds in a single API request by
using a comma-separated list of datafeeds or a wildcard expression. You can
get information for all datafeeds by using _all, by specifying * as the
<feed_id>, or by omitting the <feed_id>.
@@ -1880,8 +1880,8 @@ def get_filters(
"""
.. raw:: html
-
Get filters. - You can get a single filter or all filters.
+Get filters.
+You can get a single filter or all filters.
`Get anomaly detection job results for influencers. - Influencers are the entities that have contributed to, or are to blame for, +
Get anomaly detection job results for influencers.
+Influencers are the entities that have contributed to, or are to blame for,
the anomalies. Influencer results are available only if an
influencer_field_name is specified in the job configuration.
Get anomaly detection jobs configuration info. - You can get information for multiple anomaly detection jobs in a single API +
Get anomaly detection jobs configuration info.
+You can get information for multiple anomaly detection jobs in a single API
request by using a group name, a comma-separated list of jobs, or a wildcard
expression. You can get information for all anomaly detection jobs by using
_all, by specifying * as the <job_id>, or by omitting the <job_id>.
Get machine learning memory usage info. - Get information about how machine learning jobs and trained models are using memory, +
Get machine learning memory usage info.
+Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM.
@@ -2515,8 +2515,8 @@ def get_records( """ .. raw:: html -Get anomaly records for an anomaly detection job. - Records contain the detailed analytical results. They describe the anomalous +
Get anomaly records for an anomaly detection job.
+Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size @@ -2708,8 +2708,8 @@ def get_trained_models_stats( """ .. raw:: html -
Get trained models usage info. - You can get usage information for multiple trained +
Get trained models usage info.
+You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression.
@@ -2836,8 +2836,8 @@ def info( """ .. raw:: html -Get machine learning information. - Get defaults and limits used by machine learning. +
Get machine learning information.
+Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be @@ -3076,8 +3076,8 @@ def preview_data_frame_analytics( """ .. raw:: html -
Preview features used by data frame analytics. - Preview the extracted features used by a data frame analytics config.
+Preview features used by data frame analytics.
+Preview the extracted features used by a data frame analytics config.
`Preview a datafeed. - This API returns the first "page" of search results from a datafeed. +
Preview a datafeed.
+This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. @@ -3364,8 +3364,8 @@ def put_data_frame_analytics( """ .. raw:: html -
Create a data frame analytics job. - This API creates a data frame analytics job that performs an analysis on the +
Create a data frame analytics job.
+This API creates a data frame analytics job that performs an analysis on the
source indices and stores the outcome in a destination index.
By default, the query used in the source configuration is {"match_all": {}}.
If the destination index does not exist, it is created automatically when you start the job.
@@ -3545,8 +3545,8 @@ def put_datafeed( """ .. raw:: html -Create a datafeed. - Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. +
Create a datafeed.
+Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.
You can associate only one datafeed with each anomaly detection job.
The datafeed contains a query that runs at a defined interval (frequency).
If you are concerned about delayed data, you can add a delay (query_delay') at each interval. By default, the datafeed uses the following query: {"match_all": {"boost": 1}}`.
Create a filter. - A filter contains a list of strings. It can be used by one or more anomaly detection jobs. +
Create a filter.
+A filter contains a list of strings. It can be used by one or more anomaly detection jobs.
Specifically, filters are referenced in the custom_rules property of detector configuration objects.
Create a trained model. - Enable you to supply a trained model that is not created by data frame analytics.
+Create a trained model.
+Enable you to supply a trained model that is not created by data frame analytics.
`Create or update a trained model alias. - A trained model alias is a logical name used to reference a single trained +
Create or update a trained model alias.
+A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference @@ -4282,8 +4282,8 @@ def put_trained_model_vocabulary( """ .. raw:: html -
Create a trained model vocabulary. - This API is supported only for natural language processing (NLP) models. +
Create a trained model vocabulary.
+This API is supported only for natural language processing (NLP) models.
The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.
Reset an anomaly detection job. - All model state and results are deleted. The job is ready to start over as if +
Reset an anomaly detection job.
+All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list.
@@ -4404,8 +4404,8 @@ def revert_model_snapshot( """ .. raw:: html -Revert to a snapshot. - The machine learning features react quickly to anomalous input, learning new +
Revert to a snapshot.
+The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a @@ -4474,8 +4474,8 @@ def set_upgrade_mode( """ .. raw:: html -
Set upgrade_mode for ML indices. - Sets a cluster wide upgrade_mode setting that prepares machine learning +
Set upgrade_mode for ML indices.
+Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, @@ -4535,8 +4535,8 @@ def start_data_frame_analytics( """ .. raw:: html -
Start a data frame analytics job. - A data frame analytics job can be started and stopped multiple times +
Start a data frame analytics job.
+A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The @@ -4685,8 +4685,8 @@ def start_trained_model_deployment( """ .. raw:: html -
Start a trained model deployment. - It allocates the model to every machine learning node.
+Start a trained model deployment.
+It allocates the model to every machine learning node.
`Stop data frame analytics jobs. - A data frame analytics job can be started and stopped multiple times +
Stop data frame analytics jobs.
+A data frame analytics job can be started and stopped multiple times throughout its lifecycle.
@@ -4855,8 +4855,8 @@ def stop_datafeed( """ .. raw:: html -Stop datafeeds. - A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped +
Stop datafeeds.
+A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.
@@ -5101,8 +5101,8 @@ def update_datafeed( """ .. raw:: html -Update a datafeed. - You must stop and start the datafeed for the changes to be applied. +
Update a datafeed.
+You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead.
@@ -5265,8 +5265,8 @@ def update_filter( """ .. raw:: html -Update a filter. - Updates the description of a filter, adds items, or removes items from the list.
+Update a filter.
+Updates the description of a filter, adds items, or removes items from the list.
`Update an anomaly detection job. - Updates certain properties of an anomaly detection job.
+Update an anomaly detection job.
+Updates certain properties of an anomaly detection job.
`Update a snapshot. - Updates certain properties of a snapshot.
+Update a snapshot.
+Updates certain properties of a snapshot.
`Upgrade a snapshot. - Upgrade an anomaly detection model snapshot to the latest major version. +
Upgrade a snapshot.
+Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. diff --git a/elasticsearch/_sync/client/monitoring.py b/elasticsearch/_sync/client/monitoring.py index 59cee2235..ccd8f30b1 100644 --- a/elasticsearch/_sync/client/monitoring.py +++ b/elasticsearch/_sync/client/monitoring.py @@ -44,8 +44,8 @@ def bulk( """ .. raw:: html -
Send monitoring data. - This API is used by the monitoring features to send monitoring data.
+Send monitoring data.
+This API is used by the monitoring features to send monitoring data.
`Clear the archived repositories metering. - Clear the archived repositories metering information in the cluster.
+Clear the archived repositories metering.
+Clear the archived repositories metering information in the cluster.
`Get cluster repositories metering. - Get repositories metering information for a cluster. +
Get cluster repositories metering.
+Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.
@@ -157,8 +157,8 @@ def hot_threads( """ .. raw:: html -Get the hot threads for nodes. - Get a breakdown of the hot threads on each selected node in the cluster. +
Get the hot threads for nodes.
+Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node.
@@ -378,8 +378,8 @@ def stats( """ .. raw:: html -Get node statistics. - Get statistics for nodes in a cluster. +
Get node statistics.
+Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics.
diff --git a/elasticsearch/_sync/client/profiling.py b/elasticsearch/_sync/client/profiling.py new file mode 100644 index 000000000..dee9fbb51 --- /dev/null +++ b/elasticsearch/_sync/client/profiling.py @@ -0,0 +1,177 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +class C: + + @_rewrite_parameters( + body_name="conditions", + ) + def flamegraph( + self, + *, + conditions: t.Optional[t.Any] = None, + body: t.Optional[t.Any] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +Returns basic information about the status of Universal Profiling.
+ + + `Extracts raw stacktrace information from Universal Profiling.
+ + + `Returns basic information about the status of Universal Profiling.
+ + + `Delete a query rule. - Delete a query rule within a query ruleset. +
Delete a query rule.
+Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.
@@ -92,8 +92,8 @@ def delete_ruleset( """ .. raw:: html -Delete a query ruleset. - Remove a query ruleset and its associated data. +
Delete a query ruleset.
+Remove a query ruleset and its associated data. This is a destructive action that is not recoverable.
@@ -138,8 +138,8 @@ def get_rule( """ .. raw:: html -Get a query rule. - Get details about a query rule within a query ruleset.
+Get a query rule.
+Get details about a query rule within a query ruleset.
`Get a query ruleset. - Get details about a query ruleset.
+Get a query ruleset.
+Get details about a query ruleset.
`Get all query rulesets. - Get summarized information about the query rulesets.
+Get all query rulesets.
+Get summarized information about the query rulesets.
`Create or update a query rule. - Create or update a query rule within a query ruleset.
+Create or update a query rule.
+Create or update a query rule within a query ruleset.
IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. @@ -380,8 +380,8 @@ def put_ruleset( """ .. raw:: html -
Create or update a query ruleset. - There is a limit of 100 rules per ruleset. +
Create or update a query ruleset.
+There is a limit of 100 rules per ruleset.
This limit can be increased by using the xpack.applications.rules.max_rules_per_ruleset cluster setting.
IMPORTANT: Due to limitations within pinned queries, you can only select documents using ids or docs, but cannot use both in single rule.
It is advised to use one or the other in query rulesets, to avoid errors.
@@ -442,8 +442,8 @@ def test(
"""
.. raw:: html
-
Test a query ruleset. - Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.
+Test a query ruleset.
+Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.
`Get rollup job information. - Get the configuration, stats, and status of rollup jobs.
+Get rollup job information.
+Get the configuration, stats, and status of rollup jobs.
NOTE: This API returns only active (both STARTED and STOPPED) jobs.
If a job was created, ran for a while, then was deleted, the API does not return any details about it.
For details about a historical rollup job, the rollup capabilities API may be more useful.
Get the rollup job capabilities. - Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.
+Get the rollup job capabilities.
+Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.
This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine:
@@ -216,8 +216,8 @@ def get_rollup_index_caps( """ .. raw:: html -Get the rollup index capabilities. - Get the rollup capabilities of all jobs inside of a rollup index. +
Get the rollup index capabilities.
+Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine:
Search rolled-up data. - The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +
Search rolled-up data.
+The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.
The request body supports a subset of features from the regular search API. The following functionality is not available:
@@ -495,8 +495,8 @@ def start_job( """ .. raw:: html -Start rollup jobs. - If you try to start a job that does not exist, an exception occurs. +
Start rollup jobs.
+If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens.
@@ -543,8 +543,8 @@ def stop_job( """ .. raw:: html -Stop rollup jobs. - If you try to stop a job that does not exist, an exception occurs. +
Stop rollup jobs.
+If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens.
Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped.
This is accomplished with the wait_for_completion query parameter, and optionally a timeout. For example:
Delete a behavioral analytics collection. - The associated data stream is also deleted.
+Delete a behavioral analytics collection.
+The associated data stream is also deleted.
`Get search applications. - Get information about search applications.
+Get search applications.
+Get information about search applications.
`Render a search application query. - Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +
Render a search application query.
+Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified.
If a parameter used in the search template is not specified in params, the parameter's default value will be used.
The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API.
You must have read privileges on the backing alias of the search application.
Run a search application search. - Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. +
Run a search application search.
+Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable.
diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 2160988c0..835a67e3b 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -46,8 +46,8 @@ def cache_stats( """ .. raw:: html -Get cache statistics. - Get statistics about the shared cache for partially mounted indices.
+Get cache statistics.
+Get statistics about the shared cache for partially mounted indices.
`Clear the cache. - Clear indices and data streams from the shared cache for partially mounted indices.
+Clear the cache.
+Clear indices and data streams from the shared cache for partially mounted indices.
`Mount a snapshot. - Mount a snapshot as a searchable snapshot index. +
Mount a snapshot.
+Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes.
diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index 2672a7951..5c84be7f8 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -288,8 +288,8 @@ def bulk_update_api_keys( """ .. raw:: html -Bulk update API keys. - Update the attributes for multiple API keys.
+Bulk update API keys.
+Update the attributes for multiple API keys.
IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.
This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.
It is not possible to update expired or invalidated API keys.
@@ -3711,7 +3711,8 @@ def query_role( :param size: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - :param sort: The sort definition. You can sort on `username`, `roles`, or `enabled`. + :param sort: The sort definition. You can sort on `name`, `description`, `metadata`, + `applications.application`, `applications.privileges`, and `applications.resources`. In addition, sort can also be applied to the `_doc` field to sort by index order. """ diff --git a/elasticsearch/_sync/client/shutdown.py b/elasticsearch/_sync/client/shutdown.py index 28b360ca3..1987b0012 100644 --- a/elasticsearch/_sync/client/shutdown.py +++ b/elasticsearch/_sync/client/shutdown.py @@ -40,8 +40,8 @@ def delete_node( """ .. raw:: html -Cancel node shutdown preparations. - Remove a node from the shutdown list so it can resume normal operations. +
Cancel node shutdown preparations.
+Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch.
NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py index 5f22ae433..02342eef7 100644 --- a/elasticsearch/_sync/client/simulate.py +++ b/elasticsearch/_sync/client/simulate.py @@ -66,8 +66,8 @@ def ingest( """ .. raw:: html -
Simulate data ingestion. - Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.
+Simulate data ingestion.
+Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.
This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.
The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index 9b701de80..c4a247132 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -40,8 +40,8 @@ def delete_lifecycle( """ .. raw:: html -
Delete a policy. - Delete a snapshot lifecycle policy definition. +
Delete a policy.
+Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.
@@ -96,8 +96,8 @@ def execute_lifecycle( """ .. raw:: html -Run a policy. - Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +
Run a policy.
+Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.
@@ -151,8 +151,8 @@ def execute_retention( """ .. raw:: html -Run a retention policy. - Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +
Run a retention policy.
+Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule.
@@ -204,8 +204,8 @@ def get_lifecycle( """ .. raw:: html -Get policy information. - Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.
+Get policy information.
+Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.
`Get snapshot lifecycle management statistics. - Get global and policy-level statistics about actions taken by snapshot lifecycle management.
+Get snapshot lifecycle management statistics.
+Get global and policy-level statistics about actions taken by snapshot lifecycle management.
`Create or update a policy. - Create or update a snapshot lifecycle policy. +
Create or update a policy.
+Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored.
@@ -460,8 +460,8 @@ def start( """ .. raw:: html -Start snapshot lifecycle management. - Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +
Start snapshot lifecycle management.
+Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API.
@@ -514,8 +514,8 @@ def stop( """ .. raw:: html -Stop snapshot lifecycle management. - Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +
Stop snapshot lifecycle management.
+Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped.
diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index 5a3a542c0..484ef009f 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -46,8 +46,8 @@ def cleanup_repository( """ .. raw:: html -Clean up the snapshot repository. - Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.
+Clean up the snapshot repository.
+Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.
`Clone a snapshot. - Clone part of all of a snapshot into another snapshot in the same repository.
+Clone a snapshot.
+Clone part of all of a snapshot into another snapshot in the same repository.
`Create a snapshot. - Take a snapshot of a cluster or of data streams and indices.
+Create a snapshot.
+Take a snapshot of a cluster or of data streams and indices.
`Create or update a snapshot repository. - IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +
Create or update a snapshot repository.
+IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.
To register a snapshot repository, the cluster's global metadata must be writeable.
Ensure there are no cluster blocks (for example, cluster.blocks.read_only and clsuter.blocks.read_only_allow_delete settings) that prevent write access.
Several options for this API can be specified using a query parameter or a request body parameter. @@ -470,8 +470,8 @@ def delete_repository( """ .. raw:: html -
Delete snapshot repositories. - When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +
Delete snapshot repositories.
+When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place.
@@ -971,8 +971,8 @@ def repository_verify_integrity( """ .. raw:: html -Verify the repository integrity. - Verify the integrity of the contents of a snapshot repository.
+Verify the repository integrity.
+Verify the integrity of the contents of a snapshot repository.
This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail.
If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its read_only option to true, and use this API to verify its integrity.
Until you do so:
Restore a snapshot. - Restore a snapshot of a cluster or data streams and indices.
+Restore a snapshot.
+Restore a snapshot of a cluster or data streams and indices.
You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible.
@@ -1259,8 +1259,8 @@ def status( """ .. raw:: html -Get the snapshot status. - Get a detailed description of the current state for each shard participating in the snapshot.
+Get the snapshot status.
+Get a detailed description of the current state for each shard participating in the snapshot.
Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.
If you omit the <snapshot> request path parameter, the request retrieves information only for currently running snapshots.
@@ -1337,8 +1337,8 @@ def verify_repository(
"""
.. raw:: html
-
Verify a snapshot repository. - Check for common misconfigurations in a snapshot repository.
+Verify a snapshot repository.
+Check for common misconfigurations in a snapshot repository.
`Delete an async SQL search. - Delete an async SQL search or a stored synchronous SQL search. +
Delete an async SQL search.
+Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it.
If the Elasticsearch security features are enabled, only the following users can use this API to delete a search:
Get async SQL search results. - Get the current status and available results for an async SQL search or stored synchronous SQL search.
+Get async SQL search results.
+Get the current status and available results for an async SQL search or stored synchronous SQL search.
If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.
@@ -208,8 +208,8 @@ def get_async_status( """ .. raw:: html -Get the async SQL search status. - Get the current status of an async SQL search or a stored synchronous SQL search.
+Get the async SQL search status.
+Get the current status of an async SQL search or a stored synchronous SQL search.
`Get SQL search results. - Run an SQL request.
+Get SQL search results.
+Run an SQL request.
`Translate SQL into Elasticsearch queries. - Translate an SQL search into a search API request containing Query DSL. +
Translate SQL into Elasticsearch queries.
+Translate an SQL search into a search API request containing Query DSL.
It accepts the same request body parameters as the SQL search API, excluding cursor.
Delete a synonym rule. - Delete a synonym rule from a synonym set.
+Delete a synonym rule.
+Delete a synonym rule from a synonym set.
`Get a synonym rule. - Get a synonym rule from a synonym set.
+Get a synonym rule.
+Get a synonym rule from a synonym set.
`Get all synonym sets. - Get a summary of all defined synonym sets.
+Get all synonym sets.
+Get a summary of all defined synonym sets.
`Create or update a synonym set. - Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +
Create or update a synonym set.
+Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets.
When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.
@@ -378,8 +378,8 @@ def put_synonym_rule( """ .. raw:: html -Create or update a synonym rule. - Create or update a synonym rule in a synonym set.
+Create or update a synonym rule.
+Create or update a synonym rule in a synonym set.
If any of the synonym rules included is invalid, the API returns an error.
When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.
diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index d9fc0b385..430789094 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -121,8 +121,8 @@ def get( """ .. raw:: html -Get task information. - Get information about a task currently running in the cluster.
+Get task information.
+Get information about a task currently running in the cluster.
WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.
If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.
@@ -185,8 +185,8 @@ def list( """ .. raw:: html -Get all tasks. - Get information about the tasks currently running on one or more nodes in the cluster.
+Get all tasks.
+Get information about the tasks currently running on one or more nodes in the cluster.
WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.
Identifying running tasks
diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index fa3218f81..5a64961c7 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -55,8 +55,8 @@ def find_field_structure( """ .. raw:: html -Find the structure of a text field. - Find the structure of a text field in an Elasticsearch index.
+Find the structure of a text field.
+Find the structure of a text field in an Elasticsearch index.
This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch.
For example, if you have ingested data into a very simple index that has just @timestamp and message fields, you can use this API to see what common structure exists in the message field.
The response from the API contains:
@@ -241,8 +241,8 @@ def find_message_structure( """ .. raw:: html -Find the structure of text messages. - Find the structure of a list of text messages. +
Find the structure of text messages.
+Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch.
This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.
@@ -402,7 +402,11 @@ def find_structure( delimiter: t.Optional[str] = None, ecs_compatibility: t.Optional[str] = None, explain: t.Optional[bool] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, grok_pattern: t.Optional[str] = None, has_header_row: t.Optional[bool] = None, line_merge_size_limit: t.Optional[int] = None, @@ -416,8 +420,8 @@ def find_structure( """ .. raw:: html -Find the structure of a text file. - The text file must contain data that is suitable to be ingested into Elasticsearch.
+Find the structure of a text file.
+The text file must contain data that is suitable to be ingested into Elasticsearch.
This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. @@ -615,8 +619,8 @@ def test_grok_pattern( """ .. raw:: html -
Test a Grok pattern. - Test a Grok pattern on one or more lines of text. +
Test a Grok pattern.
+Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.
diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index c8c8b0d8b..8e239f6a0 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -104,8 +104,8 @@ def get_transform( """ .. raw:: html -Get transforms. - Get configuration information for transforms.
+Get transforms.
+Get configuration information for transforms.
`Preview a transform. - Generates a preview of the results that you will get when you create a transform with the same configuration.
+Preview a transform.
+Generates a preview of the results that you will get when you create a transform with the same configuration.
It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations.
@@ -386,8 +386,8 @@ def put_transform( """ .. raw:: html -Create a transform. - Creates a transform.
+Create a transform.
+Creates a transform.
A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity.
@@ -616,8 +616,8 @@ def set_upgrade_mode( """ .. raw:: html -Set upgrade_mode for transform indices. - Sets a cluster wide upgrade_mode setting that prepares transform +
Set upgrade_mode for transform indices.
+Sets a cluster wide upgrade_mode setting that prepares transform indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your transform indices. In those circumstances, @@ -749,8 +749,8 @@ def stop_transform( """ .. raw:: html -
Stop transforms. - Stops one or more transforms.
+Stop transforms.
+Stops one or more transforms.
`Update a transform. - Updates certain properties of a transform.
+Update a transform.
+Updates certain properties of a transform.
All updated properties except description do not take effect until after the transform starts the next checkpoint,
thus there is data consistency in each checkpoint. To use this API, you must have read and view_index_metadata
privileges for the source indices. You must also have index and read privileges for the destination index. When
diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py
index d14f8481d..1e3227261 100644
--- a/elasticsearch/_sync/client/watcher.py
+++ b/elasticsearch/_sync/client/watcher.py
@@ -39,8 +39,8 @@ def ack_watch(
"""
.. raw:: html
-
Acknowledge a watch. - Acknowledging a watch enables you to manually throttle the execution of the watch's actions.
+Acknowledge a watch.
+Acknowledging a watch enables you to manually throttle the execution of the watch's actions.
The acknowledgement state of an action is stored in the status.actions.<id>.ack.state structure.
IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution.
@@ -101,8 +101,8 @@ def activate_watch( """ .. raw:: html -Activate a watch. - A watch can be either active or inactive.
+Activate a watch.
+A watch can be either active or inactive.
`Deactivate a watch. - A watch can be either active or inactive.
+Deactivate a watch.
+A watch can be either active or inactive.
`Delete a watch.
- When the watch is removed, the document representing the watch in the .watches index is gone and it will never be run again.
Delete a watch.
+When the watch is removed, the document representing the watch in the .watches index is gone and it will never be run again.
Deleting a watch does not delete any watch execution records related to this watch from the watch history.
IMPORTANT: Deleting a watch must be done by using only this API.
Do not delete the watch directly from the .watches index using the Elasticsearch delete document API
@@ -266,8 +266,8 @@ def execute_watch(
"""
.. raw:: html
-
Run a watch. - This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.
+Run a watch.
+This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.
For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.
@@ -362,8 +362,8 @@ def get_settings( """ .. raw:: html -Get Watcher index settings.
- Get settings for the Watcher internal index (.watches).
+
Get Watcher index settings.
+Get settings for the Watcher internal index (.watches).
Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.
Create or update a watch.
- When a watch is registered, a new document that represents the watch is added to the .watches index and its trigger is immediately registered with the relevant trigger engine.
+
Create or update a watch.
+When a watch is registered, a new document that represents the watch is added to the .watches index and its trigger is immediately registered with the relevant trigger engine.
Typically for the schedule trigger, the scheduler is the trigger engine.
IMPORTANT: You must use Kibana or this API to create a watch.
Do not add a watch directly to the .watches index by using the Elasticsearch index API.
@@ -591,8 +591,8 @@ def query_watches(
"""
.. raw:: html
-
Query watches. - Get all registered watches in a paginated manner and optionally filter watches by a query.
+Query watches.
+Get all registered watches in a paginated manner and optionally filter watches by a query.
Note that only the _id and metadata.* fields are queryable or sortable.
Start the watch service. - Start the Watcher service if it is not already running.
+Start the watch service.
+Start the Watcher service if it is not already running.
`Get Watcher statistics. - This API always returns basic metrics. +
Get Watcher statistics.
+This API always returns basic metrics. You retrieve more metrics by using the metric parameter.
@@ -784,8 +784,8 @@ def stop( """ .. raw:: html -Stop the watch service. - Stop the Watcher service if it is running.
+Stop the watch service.
+Stop the Watcher service if it is running.
`Update Watcher index settings.
- Update settings for the Watcher internal index (.watches).
+
Update Watcher index settings.
+Update settings for the Watcher internal index (.watches).
Only a subset of settings can be modified.
This includes index.auto_expand_replicas, index.number_of_replicas, index.routing.allocation.exclude.*,
index.routing.allocation.include.* and index.routing.allocation.require.*.
diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py
index b44cd0909..99721e5f6 100644
--- a/elasticsearch/_sync/client/xpack.py
+++ b/elasticsearch/_sync/client/xpack.py
@@ -45,8 +45,8 @@ def info(
"""
.. raw:: html
-
Get information. - The information provided by the API includes:
+Get information.
+The information provided by the API includes:
Get usage information. - Get information about the features that are currently enabled and available under the current license. +
Get usage information.
+Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics.
diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py index 60fcc064b..a186a8be6 100644 --- a/elasticsearch/_version.py +++ b/elasticsearch/_version.py @@ -16,4 +16,4 @@ # under the License. __versionstr__ = "9.1.2" -__es_specification_commit__ = "cc623e3b52dd3dfd85848ee992713d37da020bfb" +__es_specification_commit__ = "406b584e362f09881784e907185d0ff084c409cf" diff --git a/elasticsearch/dsl/aggs.py b/elasticsearch/dsl/aggs.py index 2a6b2ff91..439955c98 100644 --- a/elasticsearch/dsl/aggs.py +++ b/elasticsearch/dsl/aggs.py @@ -1495,7 +1495,7 @@ def __init__( "DefaultType", ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, - precision: Union[float, str, "DefaultType"] = DEFAULT, + precision: Union[int, str, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, @@ -1579,7 +1579,7 @@ def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, - precision: Union[float, "DefaultType"] = DEFAULT, + precision: Union[int, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, bounds: Union[ @@ -2680,7 +2680,7 @@ def __init__( self, *, keyed: Union[bool, "DefaultType"] = DEFAULT, - percents: Union[Sequence[float], "DefaultType"] = DEFAULT, + percents: Union[float, Sequence[float], "DefaultType"] = DEFAULT, hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT, tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py index 3b5075287..c1e309d8c 100644 --- a/elasticsearch/dsl/field.py +++ b/elasticsearch/dsl/field.py @@ -3866,9 +3866,6 @@ class SemanticText(Field): by using the Update mapping API. Use the Create inference API to create the endpoint. If not specified, the inference endpoint defined by inference_id will be used at both index and query time. - :arg index_options: Settings for index_options that override any - defaults used by semantic_text, for example specific quantization - settings. :arg chunking_settings: Settings for chunking text into smaller passages. If specified, these will override the chunking settings sent in the inference endpoint associated with inference_id. If @@ -3888,11 +3885,8 @@ def __init__( meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, inference_id: Union[str, "DefaultType"] = DEFAULT, search_inference_id: Union[str, "DefaultType"] = DEFAULT, - index_options: Union[ - "types.SemanticTextIndexOptions", Dict[str, Any], "DefaultType" - ] = DEFAULT, chunking_settings: Union[ - "types.ChunkingSettings", None, Dict[str, Any], "DefaultType" + "types.ChunkingSettings", Dict[str, Any], "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, **kwargs: Any, @@ -3903,8 +3897,6 @@ def __init__( kwargs["inference_id"] = inference_id if search_inference_id is not DEFAULT: kwargs["search_inference_id"] = search_inference_id - if index_options is not DEFAULT: - kwargs["index_options"] = index_options if chunking_settings is not DEFAULT: kwargs["chunking_settings"] = chunking_settings if fields is not DEFAULT: diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py index 927af6ad4..0a2cef032 100644 --- a/elasticsearch/dsl/query.py +++ b/elasticsearch/dsl/query.py @@ -1079,8 +1079,6 @@ class Knn(Query): a query_vector_builder or query_vector, but not both. :arg num_candidates: The number of nearest neighbor candidates to consider per shard - :arg visit_percentage: The percentage of vectors to explore per shard - while doing knn search with bbq_disk :arg k: The final number of nearest neighbors to return as top hits :arg filter: Filters for the kNN search query :arg similarity: The minimum similarity for a vector to be considered @@ -1109,7 +1107,6 @@ def __init__( "types.QueryVectorBuilder", Dict[str, Any], "DefaultType" ] = DEFAULT, num_candidates: Union[int, "DefaultType"] = DEFAULT, - visit_percentage: Union[float, "DefaultType"] = DEFAULT, k: Union[int, "DefaultType"] = DEFAULT, filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, similarity: Union[float, "DefaultType"] = DEFAULT, @@ -1125,7 +1122,6 @@ def __init__( query_vector=query_vector, query_vector_builder=query_vector_builder, num_candidates=num_candidates, - visit_percentage=visit_percentage, k=k, filter=filter, similarity=similarity, @@ -1437,7 +1433,7 @@ def __init__( ] = DEFAULT, version: Union[int, "DefaultType"] = DEFAULT, version_type: Union[ - Literal["internal", "external", "external_gte"], "DefaultType" + Literal["internal", "external", "external_gte", "force"], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index b62fad025..5d518c9d2 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -151,10 +151,9 @@ class ChunkingSettings(AttrDict[Any]): strategies in the linked documentation. Defaults to `sentence` if omitted. :arg max_chunk_size: (required) The maximum size of a chunk in words. - This value cannot be lower than `20` (for `sentence` strategy) or - `10` (for `word` strategy). This value should not exceed the - window size for the associated model. Defaults to `250` if - omitted. + This value cannot be higher than `300` or lower than `20` (for + `sentence` strategy) or `10` (for `word` strategy). Defaults to + `250` if omitted. :arg separator_group: Only applicable to the `recursive` strategy and required when using it. Sets a predefined list of separators in the saved chunking settings based on the selected text type. @@ -398,17 +397,14 @@ class DenseVectorIndexOptions(AttrDict[Any]): HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. Defaults to `16` if omitted. :arg rescore_vector: The rescore vector options. This is only - applicable to `bbq_disk`, `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, - `bbq_flat`, `int4_flat`, and `int8_flat` index types. - :arg on_disk_rescore: `true` if vector rescoring should be done on- - disk Only applicable to `bbq_hnsw` + applicable to `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`, + `int4_flat`, and `int8_flat` index types. """ type: Union[ Literal[ "bbq_flat", "bbq_hnsw", - "bbq_disk", "flat", "hnsw", "int4_flat", @@ -424,7 +420,6 @@ class DenseVectorIndexOptions(AttrDict[Any]): rescore_vector: Union[ "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType ] - on_disk_rescore: Union[bool, DefaultType] def __init__( self, @@ -433,7 +428,6 @@ def __init__( Literal[ "bbq_flat", "bbq_hnsw", - "bbq_disk", "flat", "hnsw", "int4_flat", @@ -449,7 +443,6 @@ def __init__( rescore_vector: Union[ "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType ] = DEFAULT, - on_disk_rescore: Union[bool, DefaultType] = DEFAULT, **kwargs: Any, ): if type is not DEFAULT: @@ -462,8 +455,6 @@ def __init__( kwargs["m"] = m if rescore_vector is not DEFAULT: kwargs["rescore_vector"] = rescore_vector - if on_disk_rescore is not DEFAULT: - kwargs["on_disk_rescore"] = on_disk_rescore super().__init__(kwargs) @@ -2335,7 +2326,9 @@ class LikeDocument(AttrDict[Any]): per_field_analyzer: Union[Mapping[Union[str, InstrumentedField], str], DefaultType] routing: Union[str, DefaultType] version: Union[int, DefaultType] - version_type: Union[Literal["internal", "external", "external_gte"], DefaultType] + version_type: Union[ + Literal["internal", "external", "external_gte", "force"], DefaultType + ] def __init__( self, @@ -2350,7 +2343,7 @@ def __init__( routing: Union[str, DefaultType] = DEFAULT, version: Union[int, DefaultType] = DEFAULT, version_type: Union[ - Literal["internal", "external", "external_gte"], DefaultType + Literal["internal", "external", "external_gte", "force"], DefaultType ] = DEFAULT, **kwargs: Any, ): @@ -3196,33 +3189,6 @@ def __init__( super().__init__(kwargs) -class SemanticTextIndexOptions(AttrDict[Any]): - """ - :arg dense_vector: - :arg sparse_vector: - """ - - dense_vector: Union["DenseVectorIndexOptions", Dict[str, Any], DefaultType] - sparse_vector: Union["SparseVectorIndexOptions", Dict[str, Any], DefaultType] - - def __init__( - self, - *, - dense_vector: Union[ - "DenseVectorIndexOptions", Dict[str, Any], DefaultType - ] = DEFAULT, - sparse_vector: Union[ - "SparseVectorIndexOptions", Dict[str, Any], DefaultType - ] = DEFAULT, - **kwargs: Any, - ): - if dense_vector is not DEFAULT: - kwargs["dense_vector"] = dense_vector - if sparse_vector is not DEFAULT: - kwargs["sparse_vector"] = sparse_vector - super().__init__(kwargs) - - class ShapeFieldQuery(AttrDict[Any]): """ :arg indexed_shape: Queries using a pre-indexed shape.