diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index d4ca7cce0..a08db3d98 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -626,12 +626,14 @@ async def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -651,6 +653,8 @@ async def bulk( :param operations: :param index: Name of the data stream, index, or index alias to perform bulk actions on. + :param list_executed_pipelines: If `true`, the response will include the ingest + pipelines that were executed for each index or create. :param pipeline: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final @@ -661,6 +665,8 @@ async def bulk( make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to-be-created). :param routing: Custom value used to route operations to a specific shard. :param source: `true` or `false` to return the `_source` field or not, or a list of fields to return. @@ -694,6 +700,8 @@ async def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if list_executed_pipelines is not None: + __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -702,6 +710,8 @@ async def bulk( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: @@ -2274,7 +2284,26 @@ async def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index 38aa2d968..c843f7b8e 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -36,7 +36,8 @@ async def delete_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes auto-follow patterns. + Delete auto-follow patterns. Delete a collection of cross-cluster replication + auto-follow patterns. ``_ @@ -107,7 +108,10 @@ async def follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new follower index configured to follow the referenced leader index. + Create a follower. Create a cross-cluster replication follower index that follows + a specific leader index. When the API returns, the follower index exists and + cross-cluster replication starts replicating operations from the leader index + to the follower index. ``_ @@ -198,8 +202,10 @@ async def follow_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about all follower indices, including parameters and status - for each follower index + Get follower information. Get information about all cross-cluster replication + follower indices. For example, the results include follower index names, leader + index names, replication options, and whether the follower indices are active + or paused. ``_ @@ -240,8 +246,9 @@ async def follow_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves follower stats. return shard-level stats about the following tasks - associated with each shard for the specified indices. + Get follower stats. Get cross-cluster replication follower stats. The API returns + shard-level stats about the "following tasks" associated with each shard for + the specified indices. ``_ @@ -294,7 +301,23 @@ async def forget_follower( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the follower retention leases from the leader. + Forget a follower. Remove the cross-cluster replication follower retention leases + from the leader. A following index takes out retention leases on its leader index. + These leases are used to increase the likelihood that the shards of the leader + index retain the history of operations that the shards of the following index + need to run replication. When a follower index is converted to a regular index + by the unfollow API (either by directly calling the API or by index lifecycle + management tasks), these leases are removed. However, removal of the leases can + fail, for example when the remote cluster containing the leader index is unavailable. + While the leases will eventually expire on their own, their extended existence + can cause the leader index to hold more history than necessary and prevent index + lifecycle management from performing some operations on the leader index. This + API exists to enable manually removing the leases when the unfollow API is unable + to do so. NOTE: This API does not stop replication by a following index. If you + use this API with a follower index that is still actively following, the following + index will add back retention leases on the leader. The only purpose of this + API is to handle the case of failure to remove the following retention leases + after the unfollow API is invoked. ``_ @@ -350,8 +373,7 @@ async def get_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets configured auto-follow patterns. Returns the specified auto-follow pattern - collection. + Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. ``_ @@ -395,7 +417,14 @@ async def pause_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses an auto-follow pattern + Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. + When the API returns, the auto-follow pattern is inactive. New indices that are + created on the remote cluster and match the auto-follow patterns are ignored. + You can resume auto-following with the resume auto-follow pattern API. When it + resumes, the auto-follow pattern is active again and automatically configures + follower indices for newly created indices on the remote cluster that match its + patterns. Remote indices that were created while the pattern was paused will + also be followed, unless they have been deleted or closed in the interim. ``_ @@ -436,8 +465,10 @@ async def pause_follow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses a follower index. The follower index will not fetch any additional operations - from the leader index. + Pause a follower. Pause a cross-cluster replication follower index. The follower + index will not fetch any additional operations from the leader index. You can + resume following with the resume follower API. You can pause and resume a follower + index to change the configuration of the following task. ``_ @@ -512,9 +543,14 @@ async def put_auto_follow_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new named collection of auto-follow patterns against a specified remote - cluster. Newly created indices on the remote cluster matching any of the specified - patterns will be automatically configured as follower indices. + Create or update auto-follow patterns. Create a collection of cross-cluster replication + auto-follow patterns for a remote cluster. Newly created indices on the remote + cluster that match any of the patterns are automatically configured as follower + indices. Indices on the remote cluster that were created before the auto-follow + pattern was created will not be auto-followed even if they match the pattern. + This API can also be used to update auto-follow patterns. NOTE: Follower indices + that were configured automatically before updating an auto-follow pattern will + remain unchanged even if they do not match against the new patterns. ``_ @@ -638,7 +674,11 @@ async def resume_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes an auto-follow pattern that has been paused + Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + pattern that was paused. The auto-follow pattern will resume configuring following + indices for newly created indices that match its patterns on the remote cluster. + Remote indices created while the pattern was paused will also be followed unless + they have been deleted or closed in the interim. ``_ @@ -703,7 +743,11 @@ async def resume_follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes a follower index that has been paused + Resume a follower. Resume a cross-cluster replication follower index that was + paused. The follower index could have been paused with the pause follower API. + Alternatively it could be paused due to replication that cannot be retried due + to failures during following tasks. When this API returns, the follower index + will resume fetching operations from the leader index. ``_ @@ -785,7 +829,8 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets all stats related to cross-cluster replication. + Get cross-cluster replication stats. This API returns stats about auto-following + and the same shard-level stats as the get follower stats API. ``_ """ @@ -821,8 +866,13 @@ async def unfollow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops the following task associated with a follower index and removes index metadata - and settings associated with cross-cluster replication. + Unfollow an index. Convert a cross-cluster replication follower index to a regular + index. The API stops the following task associated with a follower index and + removes index metadata and settings associated with cross-cluster replication. + The follower index must be paused and closed before you call the unfollow API. + NOTE: Currently cross-cluster replication does not support converting an existing + regular index to a follower index. Converting a follower index to a regular index + is an irreversible operation. ``_ diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index ec80623c1..36cb92624 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -44,7 +44,13 @@ async def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ async def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ async def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ async def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ async def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ async def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ async def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ async def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ async def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ async def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ async def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index 09b98ecee..750fff034 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -35,8 +35,17 @@ async def get_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets a list of features which can be included in snapshots using the feature_states - field when creating a snapshot + Get the features. Get a list of features that can be included in snapshots using + the `feature_states` field when creating a snapshot. You can use this API to + determine which feature states to include when taking a snapshot. By default, + all feature states are included in a snapshot if that snapshot includes the global + state, or none if it does not. A feature state includes one or more system indices + necessary for a given feature to function. In order to ensure data integrity, + all system indices that comprise a feature state are snapshotted and restored + together. The features listed by this API are a combination of built-in features + and features defined by plugins. In order for a feature state to be listed in + this API and recognized as a valid feature state by the create snapshot API, + the plugin that defines that feature must be installed on the master node. ``_ """ @@ -72,7 +81,20 @@ async def reset_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets the internal state of features, usually by deleting system indices + Reset the features. Clear all of the state information stored in system indices + by Elasticsearch features, including the security and machine learning indices. + WARNING: Intended for development and testing use only. Do not reset features + on a production cluster. Return a cluster to the same state as a new installation + by resetting the feature state for all Elasticsearch features. This deletes all + state information stored in system indices. The response code is HTTP 200 if + the state is successfully reset for all features. It is HTTP 500 if the reset + operation failed for any feature. Note that select features might provide a way + to reset particular system indices. Using this API resets all features, both + those that are built-in and implemented as plugins. To list the features that + will be affected, use the get features API. IMPORTANT: The features installed + on the node you submit this request to are the features that will be reset. Run + on the master node if you have any doubts about which plugins are installed on + individual nodes. ``_ """ diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 96c85ae96..643db3aa9 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -38,9 +38,9 @@ async def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. You cannot delete policies - that are currently in use. If the policy is being used to manage any indices, - the request fails and returns an error. + Delete a lifecycle policy. You cannot delete policies that are currently in use. + If the policy is being used to manage any indices, the request fails and returns + an error. ``_ @@ -93,9 +93,11 @@ async def explain_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index’s current lifecycle state, such as the - currently executing phase, action, and step. Shows when the index entered each - one, the definition of the running phase, and information about any failures. + Explain the lifecycle state. Get the current lifecycle status for one or more + indices. For data streams, the API retrieves the current lifecycle status for + the stream's backing indices. The response indicates when the index entered each + lifecycle state, provides the definition of the running phase, and information + about any failures. ``_ @@ -157,7 +159,7 @@ async def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a lifecycle policy. + Get lifecycle policies. ``_ @@ -208,7 +210,7 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current index lifecycle management (ILM) status. + Get the ILM status. Get the current index lifecycle management status. ``_ """ @@ -249,10 +251,18 @@ async def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Switches the indices, ILM policies, and legacy, composable and component templates - from using custom node attributes and attribute-based allocation filters to using - data tiers, and optionally deletes one legacy index template.+ Using node roles - enables ILM to automatically move the indices between data tiers. + Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + composable, and component templates from using custom node attributes and attribute-based + allocation filters to using data tiers. Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers. + Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual + steps listed in the migration guide: 1. Stop setting the custom hot attribute + on new indices. 1. Remove custom allocation settings from existing ILM policies. + 1. Replace custom allocation settings from existing indices with the corresponding + tier preference. ILM must be stopped before performing the migration. Use the + stop ILM and get ILM status APIs to wait until the reported operation mode is + `STOPPED`. ``_ @@ -312,7 +322,21 @@ async def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Manually moves an index into the specified step and executes that step. + Move to a lifecycle step. Manually move an index into a specific step in the + lifecycle policy and run that step. WARNING: This operation can result in the + loss of data. Manually moving an index into a specific step runs that step even + if it has already been performed. This is a potentially destructive action and + this should be considered an expert level API. You must specify both the current + step and the step to be executed in the body of the request. The request will + fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the + next step. When specifying the target (`next_step`) to which the index will be + moved, either the name or both the action and name fields are optional. If only + the phase is specified, the index will move to the first step of the first action + in the target phase. If the phase and action are specified, the index will move + to the first step of the specified action in the specified phase. Only actions + specified in the ILM policy are considered valid. An index cannot move to a step + that is not part of its policy. ``_ @@ -375,8 +399,9 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy. If the specified policy exists, the policy is replaced - and the policy version is incremented. + Create or update a lifecycle policy. If the specified policy exists, it is replaced + and the policy version is incremented. NOTE: Only the latest version of the policy + is stored, you cannot revert to previous versions. ``_ @@ -435,7 +460,8 @@ async def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the assigned lifecycle policy and stops managing the specified index + Remove policies from an index. Remove the assigned lifecycle policies from an + index or a data stream's backing indices. It also stops managing the indices. ``_ @@ -475,7 +501,10 @@ async def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retries executing the policy for an index that is in the ERROR step. + Retry a policy. Retry running the lifecycle policy for an index that is in the + ERROR step. The API sets the policy back to the step where the error occurred + and runs the step. Use the explain lifecycle state API to determine whether an + index is in the ERROR step. ``_ @@ -517,7 +546,9 @@ async def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the index lifecycle management (ILM) plugin. + Start the ILM plugin. Start the index lifecycle management plugin if it is currently + stopped. ILM is started automatically when the cluster is formed. Restarting + ILM is necessary only when it has been stopped using the stop ILM API. ``_ @@ -561,8 +592,12 @@ async def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Halts all lifecycle management operations and stops the index lifecycle management - (ILM) plugin + Stop the ILM plugin. Halt all lifecycle management operations and stop the index + lifecycle management plugin. This is useful when you are performing maintenance + on the cluster and need to prevent ILM from performing any actions on your indices. + The API returns as soon as the stop request has been acknowledged, but the plugin + might continue to run until in-progress operations complete and the plugin can + be safely stopped. Use the get ILM status API to check whether ILM is running. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index f4326c65d..84b8009bb 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -245,8 +245,8 @@ async def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the caches of one or more indices. For data streams, the API clears the - caches of the stream’s backing indices. + Clear the cache. Clear the cache of one or more indices. For data streams, the + API clears the caches of the stream's backing indices. ``_ @@ -331,7 +331,26 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones an existing index. + Clone an index. Clone an existing index into a new index. Each original primary + shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch + does not apply index templates to the resulting index. The API also does not + copy index metadata from the original index. Index metadata includes aliases, + index lifecycle management phase definitions, and cross-cluster replication (CCR) + follower information. For example, if you clone a CCR follower index, the resulting + clone will not be a follower index. The clone API copies most index settings + from the source index to the resulting index, with the exception of `index.number_of_replicas` + and `index.auto_expand_replicas`. To set the number of replicas in the resulting + index, configure these settings in the clone request. Cloning works as follows: + * First, it creates a new target index with the same definition as the source + index. * Then it hard-links segments from the source index into the target index. + If the file system does not support hard-linking, all segments are copied into + the new index, which is a much more time consuming process. * Finally, it recovers + the target index as though it were a closed index which had just been re-opened. + IMPORTANT: Indices can only be cloned if they meet the following requirements: + * The target index must not exist. * The source index must have the same number + of primary shards as the target index. * The node handling the clone process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -419,7 +438,24 @@ async def close( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes an index. + Close an index. A closed index is blocked for read or write operations and does + not allow all operations that opened indices allow. It is not possible to index + documents or to search for documents in a closed index. Closed indices do not + have to maintain internal data structures for indexing or searching documents, + which results in a smaller overhead on the cluster. When opening or closing an + index, the master node is responsible for restarting the index shards to reflect + the new state of the index. The shards will then go through the normal recovery + process. The data of opened and closed indices is automatically replicated by + the cluster to ensure that enough shard copies are safely kept around at all + times. You can open and close multiple indices. An error is thrown if the request + explicitly refers to a missing index. This behaviour can be turned off using + the `ignore_unavailable=true` parameter. By default, you must explicitly name + the indices you are opening or closing. To open or close indices with `_all`, + `*`, or other wildcard expressions, change the` action.destructive_requires_name` + setting to `false`. This setting can also be changed with the cluster update + settings API. Closed indices consume a significant amount of disk-space which + can cause problems in managed environments. Closing indices can be turned off + with the cluster settings API by setting `cluster.indices.close.enable` to `false`. ``_ @@ -1061,7 +1097,10 @@ async def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyzes the disk usage of each field of an index or data stream. + Analyze the index disk usage. Analyze the disk usage of each field of an index + or data stream. This API might not support indices created in previous Elasticsearch + versions. The result of a small index can be inaccurate as some parts of an index + might not be analyzed by the API. ``_ @@ -1135,9 +1174,14 @@ async def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Aggregates a time series (TSDS) index and stores pre-computed statistical summaries - (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped - by a configured time interval. + Downsample an index. Aggregate a time series (TSDS) index and store pre-computed + statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each + metric field grouped by a configured time interval. For example, a TSDS index + that contains metrics sampled every 10 seconds can be downsampled to an hourly + index. All documents within an hour interval are summarized and stored as a single + document in the downsample index. NOTE: Only indices in a time series data stream + are supported. Neither field nor document level security can be defined on the + source index. The source index must be read only (`index.blocks.write: true`). ``_ @@ -1456,8 +1500,8 @@ async def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Retrieves information about an index - or data stream’s current data stream lifecycle status, such as time since index + Get the status for a data stream lifecycle. Get information about an index or + data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. @@ -1523,7 +1567,10 @@ async def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns field usage information for each shard and field of an index. + Get field usage stats. Get field usage information for each shard and field of + an index. Field usage statistics are automatically captured when queries are + running on a cluster. A shard-level search request that accesses a given field, + even if multiple times during that request, is counted as a single use. ``_ @@ -1611,7 +1658,22 @@ async def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Flushes one or more data streams or indices. + Flush data streams or indices. Flushing a data stream or index is the process + of making sure that any data that is currently only stored in the transaction + log is also permanently stored in the Lucene index. When restarting, Elasticsearch + replays any unflushed operations from the transaction log into the Lucene index + to bring it back into the state that it was in before the restart. Elasticsearch + automatically triggers flushes as needed, using heuristics that trade off the + size of the unflushed transaction log against the cost of performing each flush. + After each operation has been flushed it is permanently stored in the Lucene + index. This may mean that there is no need to maintain an additional copy of + it in the transaction log. The transaction log is made up of multiple files, + called generations, and Elasticsearch will delete any generation files when they + are no longer needed, freeing up disk space. It is also possible to trigger a + flush on one or more indices using the flush API, although it is rare for users + to need to call this API directly. If you call the flush API after indexing some + documents then a successful response indicates that Elasticsearch has flushed + all the documents that were indexed before the flush API was called. ``_ @@ -1694,7 +1756,21 @@ async def forcemerge( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the force merge operation on one or more indices. + Force a merge. Perform the force merge operation on the shards of one or more + indices. For data streams, the API forces a merge on the shards of the stream's + backing indices. Merging reduces the number of segments in each shard by merging + some of them together and also frees up the space used by deleted documents. + Merging normally happens automatically, but sometimes it is useful to trigger + a merge manually. WARNING: We recommend force merging only a read-only index + (meaning the index is no longer receiving writes). When documents are updated + or deleted, the old version is not immediately removed but instead soft-deleted + and marked with a "tombstone". These soft-deleted documents are automatically + cleaned up during regular segment merges. But force merge can cause very large + (greater than 5 GB) segments to be produced, which are not eligible for regular + merges. So the number of soft-deleted documents can then grow rapidly, resulting + in higher disk usage and worse search performance. If you regularly force merge + an index receiving writes, this can also make snapshots more expensive, since + the new documents can't be backed up incrementally. ``_ @@ -2679,8 +2755,18 @@ async def promote_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Promotes a data stream from a replicated data stream managed by CCR to a regular - data stream + Promote a data stream. Promote a data stream from a replicated data stream managed + by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, + a data stream from a remote cluster can be replicated to the local cluster. These + data streams can't be rolled over in the local cluster. These replicated data + streams roll over only if the upstream data stream rolls over. In the event that + the remote cluster is no longer available, the data stream in the local cluster + can be promoted to a regular data stream, which allows these data streams to + be rolled over in the local cluster. NOTE: When promoting a data stream, ensure + the local cluster has a data stream enabled index template that matches the data + stream. If this is missing, the data stream will not be able to roll over until + a matching index template is created. This will affect the lifecycle management + of the data stream and interfere with the data stream size and retention. ``_ @@ -3343,7 +3429,16 @@ async def put_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an index pattern that matches the index name. + IMPORTANT: This documentation is about legacy index templates, which are deprecated + and will be replaced by the composable templates introduced in Elasticsearch + 7.8. Composable templates always take precedence over legacy templates. If no + composable template matches a new index, matching legacy templates are applied + according to their order. Index templates are only applied during index creation. + Changes to index templates do not affect existing indices. Settings and mappings + specified in create index API requests override any settings or mappings specified + in an index template. ``_ @@ -3423,9 +3518,25 @@ async def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about ongoing and completed shard recoveries for one or more - indices. For data streams, the API returns information for the stream’s backing - indices. + Get index recovery information. Get information about ongoing and completed shard + recoveries for one or more indices. For data streams, the API returns information + for the stream's backing indices. Shard recovery is the process of initializing + a shard copy, such as restoring a primary shard from a snapshot or creating a + replica shard from a primary shard. When a shard recovery completes, the recovered + shard is available for search and indexing. Recovery automatically occurs during + the following processes: * When creating an index for the first time. * When + a node rejoins the cluster and starts up any missing primary shard copies using + the data that it holds in its data path. * Creation of new replica shard copies + from the primary. * Relocation of a shard copy to a different node in the same + cluster. * A snapshot restore operation. * A clone, shrink, or split operation. + You can determine the cause of a shard recovery using the recovery or cat recovery + APIs. The index recovery API reports information about completed recoveries only + for shard copies that currently exist in the cluster. It only reports the last + recovery for each shard copy and does not report historical information about + earlier recoveries, nor does it report information about the recoveries of shard + copies that no longer exist. This means that if a shard copy completes a recovery + and then Elasticsearch relocates it onto a different node then the information + about the original recovery will not be shown in the recovery API. ``_ @@ -3559,7 +3670,21 @@ async def reload_search_analyzers( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads an index's search analyzers and their resources. + Reload search analyzers. Reload an index's search analyzers and their resources. + For data streams, the API reloads search analyzers and resources for the stream's + backing indices. IMPORTANT: After reloading the search analyzers you should clear + the request cache to make sure it doesn't contain responses derived from the + previous versions of the analyzer. You can use the reload search analyzers API + to pick up changes to synonym files used in the `synonym_graph` or `synonym` + token filter of a search analyzer. To be eligible, the token filter must have + an `updateable` flag of `true` and only be used in search analyzers. NOTE: This + API does not perform a reload for each shard of an index. Instead, it performs + a reload for each node containing index shards. As a result, the total shard + count returned by the API can differ from the number of index shards. Because + reloading affects every node with an index shard, it is important to update the + synonym file on every data node in the cluster--including nodes that don't contain + a shard replica--before using this API. This ensures the synonym file is updated + everywhere in the cluster in case shards are relocated in the future. ``_ @@ -3623,9 +3748,20 @@ async def resolve_cluster( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified index expressions to return information about each cluster, - including the local cluster, if included. Multiple patterns and remote clusters - are supported. + Resolve the cluster. Resolve the specified index expressions to return information + about each cluster, including the local cluster, if included. Multiple patterns + and remote clusters are supported. This endpoint is useful before doing a cross-cluster + search in order to determine which remote clusters should be included in a search. + You use the same index expression with this endpoint as you would for cross-cluster + search. Index and cluster exclusions are also supported with this endpoint. For + each cluster in the index expression, information is returned about: * Whether + the querying ("local") cluster is currently connected to each remote cluster + in the index expression scope. * Whether each remote cluster is configured with + `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, + or data streams on that cluster that match the index expression. * Whether the + search is likely to have errors returned when you do the cross-cluster search + (including any authorization errors if you do not have permission to query the + index). * Cluster version information, including the Elasticsearch server version. ``_ @@ -3877,8 +4013,9 @@ async def segments( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the stream’s backing indices. + Get index segments. Get low-level information about the Lucene segments in index + shards. For data streams, the API returns information about the stream's backing + indices. ``_ @@ -3957,8 +4094,14 @@ async def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves store information about replica shards in one or more indices. For - data streams, the API retrieves store information for the stream’s backing indices. + Get index shard stores. Get store information about replica shards in one or + more indices. For data streams, the API retrieves store information for the stream's + backing indices. The index shard stores API returns the following information: + * The node on which each replica shard exists. * The allocation ID for each replica + shard. * A unique ID for each replica shard. * Any errors encountered while opening + the shard index or from an earlier failure. By default, the API returns store + information only for primary shards that are unassigned or have one or more unassigned + replica shards. ``_ @@ -4029,7 +4172,39 @@ async def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Shrinks an existing index into a new index with fewer primary shards. + Shrink an index. Shrink an index into a new index with fewer primary shards. + Before you can shrink an index: * The index must be read-only. * A copy of every + shard in the index must reside on the same node. * The index must have a green + health status. To make shard allocation easier, we recommend you also remove + the index's replica shards. You can later re-add replica shards as part of the + shrink operation. The requested number of primary shards in the target index + must be a factor of the number of shards in the source index. For example an + index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an + index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards + in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must + be present on the same node. The current write index on a data stream cannot + be shrunk. In order to shrink the current write index, the data stream must first + be rolled over so that a new write index is created and then the previous write + index can be shrunk. A shrink operation: * Creates a new target index with the + same definition as the source index, but with a smaller number of primary shards. + * Hard-links segments from the source index into the target index. If the file + system does not support hard-linking, then all segments are copied into the new + index, which is a much more time consuming process. Also if using multiple data + paths, shards on different data paths require a full copy of segment files if + they are not on the same disk since hardlinks do not work across disks. * Recovers + the target index as though it were a closed index which had just been re-opened. + Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + * The target index must not exist. * The source index must have more primary + shards than the target index. * The number of primary shards in the target index + must be a factor of the number of primary shards in the source index. The source + index must have more primary shards than the target index. * The index must not + contain more than 2,147,483,519 documents in total across all shards that will + be shrunk into a single shard on the target index as this is the maximum number + of docs that can fit into a single shard. * The node handling the shrink process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4314,7 +4489,27 @@ async def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Splits an existing index into a new index with more primary shards. + Split an index. Split an index into a new index with more primary shards. * Before + you can split an index: * The index must be read-only. * The cluster health status + must be green. The number of times the index can be split (and the number of + shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` + setting. The number of routing shards specifies the hashing space that is used + internally to distribute documents across shards with consistent hashing. For + instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x + 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target + index with the same definition as the source index, but with a larger number + of primary shards. * Hard-links segments from the source index into the target + index. If the file system doesn't support hard-linking, all segments are copied + into the new index, which is a much more time consuming process. * Hashes all + documents again, after low level files are created, to delete documents that + belong to a different shard. * Recovers the target index as though it were a + closed index which had just been re-opened. IMPORTANT: Indices can only be split + if they satisfy the following requirements: * The target index must not exist. + * The source index must have fewer primary shards than the target index. * The + number of primary shards in the target index must be a multiple of the number + of primary shards in the source index. * The node handling the split process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4406,8 +4601,14 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistics for one or more indices. For data streams, the API retrieves - statistics for the stream’s backing indices. + Get index statistics. For data streams, the API retrieves statistics for the + stream's backing indices. By default, the returned statistics are index-level + with `primaries` and `total` aggregations. `primaries` are the values for only + the primary shards. `total` are the accumulated values for both primary and replica + shards. To get shard-level statistics, set the `level` parameter to `shards`. + NOTE: When moving to another node, the shard-level statistics for a shard are + cleared. Although the shard is no longer part of the node, that node retains + any node-level statistics to which the shard contributed. ``_ @@ -4510,7 +4711,8 @@ async def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreezes an index. + Unfreeze an index. When a frozen index is unfrozen, the index goes through the + normal recovery process and becomes writeable again. ``_ diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index 0f4a209cd..48b79050e 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -35,7 +35,9 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes licensing information for the cluster + Delete the license. When the license expires, your subscription level reverts + to Basic. If the operator privileges feature is enabled, only operator users + can use this API. ``_ """ @@ -72,9 +74,11 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get license information. Returns information about your Elastic license, including - its type, its status, when it was issued, and when it expires. For more information - about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + Get license information. Get information about your Elastic license including + its type, its status, when it was issued, and when it expires. NOTE: If the master + node is generating a new cluster state, the get license API may return a `404 + Not Found` response. If you receive an unexpected 404 response after cluster + startup, wait a short period and retry the request. ``_ @@ -120,7 +124,7 @@ async def get_basic_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the basic license. + Get the basic license status. ``_ """ @@ -155,7 +159,7 @@ async def get_trial_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the trial license. + Get the trial status. ``_ """ @@ -196,7 +200,14 @@ async def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the license for the cluster. + Update the license. You can update your license at runtime without shutting down + your nodes. License updates take effect immediately. If the license you are installing + does not support all of the features that were available with your previous license, + however, you are notified in the response. You must then re-submit the API request + with the acknowledge parameter set to true. NOTE: If Elasticsearch security features + are enabled and you are installing a gold or higher license, you must enable + TLS on the transport networking layer before you install the license. If the + operator privileges feature is enabled, only operator users can use this API. ``_ @@ -250,12 +261,13 @@ async def post_start_basic( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The start basic API enables you to initiate an indefinite basic license, which - gives access to all the basic features. If the basic license does not support - all of the features that are available with your current license, however, you - are notified in the response. You must then re-submit the API request with the - acknowledge parameter set to true. To check the status of your basic license, - use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + Start a basic license. Start an indefinite basic license, which gives access + to all the basic features. NOTE: In order to start a basic license, you must + not currently have a basic license. If the basic license does not support all + of the features that are available with your current license, however, you are + notified in the response. You must then re-submit the API request with the `acknowledge` + parameter set to `true`. To check the status of your basic license, use the get + basic license API. ``_ @@ -297,8 +309,12 @@ async def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - The start trial API enables you to start a 30-day trial, which gives access to - all subscription features. + Start a trial. Start a 30-day trial, which gives access to all subscription features. + NOTE: You are allowed to start a trial only if your cluster has not already activated + a trial for the current major product version. For example, if you have already + activated a trial for v8.0, you cannot start a new trial until v9.0. You can, + however, request an extended trial at https://www.elastic.co/trialextension. + To check the status of your trial, use the get trial status API. ``_ diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index dd1befecf..7f7308aeb 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -2488,6 +2488,7 @@ async def get_trained_models( ], ] ] = None, + include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2514,6 +2515,8 @@ async def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. + :param include_model_definition: parameter is deprecated! Use [include=definition] + instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2543,6 +2546,8 @@ async def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include + if include_model_definition is not None: + __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -3169,9 +3174,11 @@ async def put_calendar_job( "description", "headers", "max_num_threads", + "meta", "model_memory_limit", "version", ), + parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) async def put_data_frame_analytics( @@ -3189,6 +3196,7 @@ async def put_data_frame_analytics( headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, + meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, @@ -3249,6 +3257,7 @@ async def put_data_frame_analytics( Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs @@ -3293,6 +3302,8 @@ async def put_data_frame_analytics( __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads + if meta is not None: + __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: @@ -3311,6 +3322,7 @@ async def put_data_frame_analytics( @_rewrite_parameters( body_fields=( "aggregations", + "aggs", "chunking_config", "delayed_data_check_config", "frequency", @@ -3333,6 +3345,7 @@ async def put_datafeed( *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, @@ -3386,6 +3399,8 @@ async def put_datafeed( :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + :param aggs: If set, the datafeed performs aggregation searches. Support for + aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. @@ -3473,6 +3488,8 @@ async def put_datafeed( if not __body: if aggregations is not None: __body["aggregations"] = aggregations + if aggs is not None: + __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: @@ -3595,6 +3612,7 @@ async def put_job( analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] @@ -3604,9 +3622,19 @@ async def put_job( datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, @@ -3641,6 +3669,9 @@ async def put_job( to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + :param allow_no_indices: If `true`, wildcard indices expressions that resolve + into no concrete indices are ignored. This includes the `_all` string or + when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for @@ -3664,7 +3695,20 @@ async def put_job( using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values. Valid + values are: * `all`: Match any data stream or index, including hidden ones. + * `closed`: Match closed, non-hidden indices. Also matches any non-hidden + data stream. Data streams cannot be closed. * `hidden`: Match hidden data + streams and hidden indices. Must be combined with `open`, `closed`, or both. + * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden + indices. Also matches any non-hidden data stream. :param groups: A list of job groups. A job can belong to no groups or many. + :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + ignored when frozen. + :param ignore_unavailable: If `true`, unavailable indices (missing or closed) + are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance @@ -3704,12 +3748,20 @@ async def put_job( __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 21ca60964..6e9006333 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -44,8 +44,8 @@ async def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ async def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ async def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ async def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ async def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ async def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ async def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 48b96a052..a175037eb 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -624,12 +624,14 @@ def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -649,6 +651,8 @@ def bulk( :param operations: :param index: Name of the data stream, index, or index alias to perform bulk actions on. + :param list_executed_pipelines: If `true`, the response will include the ingest + pipelines that were executed for each index or create. :param pipeline: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final @@ -659,6 +663,8 @@ def bulk( make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to-be-created). :param routing: Custom value used to route operations to a specific shard. :param source: `true` or `false` to return the `_source` field or not, or a list of fields to return. @@ -692,6 +698,8 @@ def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if list_executed_pipelines is not None: + __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -700,6 +708,8 @@ def bulk( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: @@ -2272,7 +2282,26 @@ def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index dbd1e1373..aeca44c29 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -36,7 +36,8 @@ def delete_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes auto-follow patterns. + Delete auto-follow patterns. Delete a collection of cross-cluster replication + auto-follow patterns. ``_ @@ -107,7 +108,10 @@ def follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new follower index configured to follow the referenced leader index. + Create a follower. Create a cross-cluster replication follower index that follows + a specific leader index. When the API returns, the follower index exists and + cross-cluster replication starts replicating operations from the leader index + to the follower index. ``_ @@ -198,8 +202,10 @@ def follow_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about all follower indices, including parameters and status - for each follower index + Get follower information. Get information about all cross-cluster replication + follower indices. For example, the results include follower index names, leader + index names, replication options, and whether the follower indices are active + or paused. ``_ @@ -240,8 +246,9 @@ def follow_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves follower stats. return shard-level stats about the following tasks - associated with each shard for the specified indices. + Get follower stats. Get cross-cluster replication follower stats. The API returns + shard-level stats about the "following tasks" associated with each shard for + the specified indices. ``_ @@ -294,7 +301,23 @@ def forget_follower( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the follower retention leases from the leader. + Forget a follower. Remove the cross-cluster replication follower retention leases + from the leader. A following index takes out retention leases on its leader index. + These leases are used to increase the likelihood that the shards of the leader + index retain the history of operations that the shards of the following index + need to run replication. When a follower index is converted to a regular index + by the unfollow API (either by directly calling the API or by index lifecycle + management tasks), these leases are removed. However, removal of the leases can + fail, for example when the remote cluster containing the leader index is unavailable. + While the leases will eventually expire on their own, their extended existence + can cause the leader index to hold more history than necessary and prevent index + lifecycle management from performing some operations on the leader index. This + API exists to enable manually removing the leases when the unfollow API is unable + to do so. NOTE: This API does not stop replication by a following index. If you + use this API with a follower index that is still actively following, the following + index will add back retention leases on the leader. The only purpose of this + API is to handle the case of failure to remove the following retention leases + after the unfollow API is invoked. ``_ @@ -350,8 +373,7 @@ def get_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets configured auto-follow patterns. Returns the specified auto-follow pattern - collection. + Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. ``_ @@ -395,7 +417,14 @@ def pause_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses an auto-follow pattern + Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. + When the API returns, the auto-follow pattern is inactive. New indices that are + created on the remote cluster and match the auto-follow patterns are ignored. + You can resume auto-following with the resume auto-follow pattern API. When it + resumes, the auto-follow pattern is active again and automatically configures + follower indices for newly created indices on the remote cluster that match its + patterns. Remote indices that were created while the pattern was paused will + also be followed, unless they have been deleted or closed in the interim. ``_ @@ -436,8 +465,10 @@ def pause_follow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses a follower index. The follower index will not fetch any additional operations - from the leader index. + Pause a follower. Pause a cross-cluster replication follower index. The follower + index will not fetch any additional operations from the leader index. You can + resume following with the resume follower API. You can pause and resume a follower + index to change the configuration of the following task. ``_ @@ -512,9 +543,14 @@ def put_auto_follow_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new named collection of auto-follow patterns against a specified remote - cluster. Newly created indices on the remote cluster matching any of the specified - patterns will be automatically configured as follower indices. + Create or update auto-follow patterns. Create a collection of cross-cluster replication + auto-follow patterns for a remote cluster. Newly created indices on the remote + cluster that match any of the patterns are automatically configured as follower + indices. Indices on the remote cluster that were created before the auto-follow + pattern was created will not be auto-followed even if they match the pattern. + This API can also be used to update auto-follow patterns. NOTE: Follower indices + that were configured automatically before updating an auto-follow pattern will + remain unchanged even if they do not match against the new patterns. ``_ @@ -638,7 +674,11 @@ def resume_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes an auto-follow pattern that has been paused + Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + pattern that was paused. The auto-follow pattern will resume configuring following + indices for newly created indices that match its patterns on the remote cluster. + Remote indices created while the pattern was paused will also be followed unless + they have been deleted or closed in the interim. ``_ @@ -703,7 +743,11 @@ def resume_follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes a follower index that has been paused + Resume a follower. Resume a cross-cluster replication follower index that was + paused. The follower index could have been paused with the pause follower API. + Alternatively it could be paused due to replication that cannot be retried due + to failures during following tasks. When this API returns, the follower index + will resume fetching operations from the leader index. ``_ @@ -785,7 +829,8 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets all stats related to cross-cluster replication. + Get cross-cluster replication stats. This API returns stats about auto-following + and the same shard-level stats as the get follower stats API. ``_ """ @@ -821,8 +866,13 @@ def unfollow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops the following task associated with a follower index and removes index metadata - and settings associated with cross-cluster replication. + Unfollow an index. Convert a cross-cluster replication follower index to a regular + index. The API stops the following task associated with a follower index and + removes index metadata and settings associated with cross-cluster replication. + The follower index must be paused and closed before you call the unfollow API. + NOTE: Currently cross-cluster replication does not support converting an existing + regular index to a follower index. Converting a follower index to a regular index + is an irreversible operation. ``_ diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 135e92cbe..d52b243bc 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -44,7 +44,13 @@ def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index d60194583..c74e73780 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -35,8 +35,17 @@ def get_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets a list of features which can be included in snapshots using the feature_states - field when creating a snapshot + Get the features. Get a list of features that can be included in snapshots using + the `feature_states` field when creating a snapshot. You can use this API to + determine which feature states to include when taking a snapshot. By default, + all feature states are included in a snapshot if that snapshot includes the global + state, or none if it does not. A feature state includes one or more system indices + necessary for a given feature to function. In order to ensure data integrity, + all system indices that comprise a feature state are snapshotted and restored + together. The features listed by this API are a combination of built-in features + and features defined by plugins. In order for a feature state to be listed in + this API and recognized as a valid feature state by the create snapshot API, + the plugin that defines that feature must be installed on the master node. ``_ """ @@ -72,7 +81,20 @@ def reset_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets the internal state of features, usually by deleting system indices + Reset the features. Clear all of the state information stored in system indices + by Elasticsearch features, including the security and machine learning indices. + WARNING: Intended for development and testing use only. Do not reset features + on a production cluster. Return a cluster to the same state as a new installation + by resetting the feature state for all Elasticsearch features. This deletes all + state information stored in system indices. The response code is HTTP 200 if + the state is successfully reset for all features. It is HTTP 500 if the reset + operation failed for any feature. Note that select features might provide a way + to reset particular system indices. Using this API resets all features, both + those that are built-in and implemented as plugins. To list the features that + will be affected, use the get features API. IMPORTANT: The features installed + on the node you submit this request to are the features that will be reset. Run + on the master node if you have any doubts about which plugins are installed on + individual nodes. ``_ """ diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index 8c98bb4e5..2c846c6d4 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -38,9 +38,9 @@ def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. You cannot delete policies - that are currently in use. If the policy is being used to manage any indices, - the request fails and returns an error. + Delete a lifecycle policy. You cannot delete policies that are currently in use. + If the policy is being used to manage any indices, the request fails and returns + an error. ``_ @@ -93,9 +93,11 @@ def explain_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index’s current lifecycle state, such as the - currently executing phase, action, and step. Shows when the index entered each - one, the definition of the running phase, and information about any failures. + Explain the lifecycle state. Get the current lifecycle status for one or more + indices. For data streams, the API retrieves the current lifecycle status for + the stream's backing indices. The response indicates when the index entered each + lifecycle state, provides the definition of the running phase, and information + about any failures. ``_ @@ -157,7 +159,7 @@ def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a lifecycle policy. + Get lifecycle policies. ``_ @@ -208,7 +210,7 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current index lifecycle management (ILM) status. + Get the ILM status. Get the current index lifecycle management status. ``_ """ @@ -249,10 +251,18 @@ def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Switches the indices, ILM policies, and legacy, composable and component templates - from using custom node attributes and attribute-based allocation filters to using - data tiers, and optionally deletes one legacy index template.+ Using node roles - enables ILM to automatically move the indices between data tiers. + Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + composable, and component templates from using custom node attributes and attribute-based + allocation filters to using data tiers. Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers. + Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual + steps listed in the migration guide: 1. Stop setting the custom hot attribute + on new indices. 1. Remove custom allocation settings from existing ILM policies. + 1. Replace custom allocation settings from existing indices with the corresponding + tier preference. ILM must be stopped before performing the migration. Use the + stop ILM and get ILM status APIs to wait until the reported operation mode is + `STOPPED`. ``_ @@ -312,7 +322,21 @@ def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Manually moves an index into the specified step and executes that step. + Move to a lifecycle step. Manually move an index into a specific step in the + lifecycle policy and run that step. WARNING: This operation can result in the + loss of data. Manually moving an index into a specific step runs that step even + if it has already been performed. This is a potentially destructive action and + this should be considered an expert level API. You must specify both the current + step and the step to be executed in the body of the request. The request will + fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the + next step. When specifying the target (`next_step`) to which the index will be + moved, either the name or both the action and name fields are optional. If only + the phase is specified, the index will move to the first step of the first action + in the target phase. If the phase and action are specified, the index will move + to the first step of the specified action in the specified phase. Only actions + specified in the ILM policy are considered valid. An index cannot move to a step + that is not part of its policy. ``_ @@ -375,8 +399,9 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy. If the specified policy exists, the policy is replaced - and the policy version is incremented. + Create or update a lifecycle policy. If the specified policy exists, it is replaced + and the policy version is incremented. NOTE: Only the latest version of the policy + is stored, you cannot revert to previous versions. ``_ @@ -435,7 +460,8 @@ def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the assigned lifecycle policy and stops managing the specified index + Remove policies from an index. Remove the assigned lifecycle policies from an + index or a data stream's backing indices. It also stops managing the indices. ``_ @@ -475,7 +501,10 @@ def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retries executing the policy for an index that is in the ERROR step. + Retry a policy. Retry running the lifecycle policy for an index that is in the + ERROR step. The API sets the policy back to the step where the error occurred + and runs the step. Use the explain lifecycle state API to determine whether an + index is in the ERROR step. ``_ @@ -517,7 +546,9 @@ def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the index lifecycle management (ILM) plugin. + Start the ILM plugin. Start the index lifecycle management plugin if it is currently + stopped. ILM is started automatically when the cluster is formed. Restarting + ILM is necessary only when it has been stopped using the stop ILM API. ``_ @@ -561,8 +592,12 @@ def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Halts all lifecycle management operations and stops the index lifecycle management - (ILM) plugin + Stop the ILM plugin. Halt all lifecycle management operations and stop the index + lifecycle management plugin. This is useful when you are performing maintenance + on the cluster and need to prevent ILM from performing any actions on your indices. + The API returns as soon as the stop request has been acknowledged, but the plugin + might continue to run until in-progress operations complete and the plugin can + be safely stopped. Use the get ILM status API to check whether ILM is running. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 4b9988368..30c8644d0 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -245,8 +245,8 @@ def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the caches of one or more indices. For data streams, the API clears the - caches of the stream’s backing indices. + Clear the cache. Clear the cache of one or more indices. For data streams, the + API clears the caches of the stream's backing indices. ``_ @@ -331,7 +331,26 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones an existing index. + Clone an index. Clone an existing index into a new index. Each original primary + shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch + does not apply index templates to the resulting index. The API also does not + copy index metadata from the original index. Index metadata includes aliases, + index lifecycle management phase definitions, and cross-cluster replication (CCR) + follower information. For example, if you clone a CCR follower index, the resulting + clone will not be a follower index. The clone API copies most index settings + from the source index to the resulting index, with the exception of `index.number_of_replicas` + and `index.auto_expand_replicas`. To set the number of replicas in the resulting + index, configure these settings in the clone request. Cloning works as follows: + * First, it creates a new target index with the same definition as the source + index. * Then it hard-links segments from the source index into the target index. + If the file system does not support hard-linking, all segments are copied into + the new index, which is a much more time consuming process. * Finally, it recovers + the target index as though it were a closed index which had just been re-opened. + IMPORTANT: Indices can only be cloned if they meet the following requirements: + * The target index must not exist. * The source index must have the same number + of primary shards as the target index. * The node handling the clone process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -419,7 +438,24 @@ def close( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes an index. + Close an index. A closed index is blocked for read or write operations and does + not allow all operations that opened indices allow. It is not possible to index + documents or to search for documents in a closed index. Closed indices do not + have to maintain internal data structures for indexing or searching documents, + which results in a smaller overhead on the cluster. When opening or closing an + index, the master node is responsible for restarting the index shards to reflect + the new state of the index. The shards will then go through the normal recovery + process. The data of opened and closed indices is automatically replicated by + the cluster to ensure that enough shard copies are safely kept around at all + times. You can open and close multiple indices. An error is thrown if the request + explicitly refers to a missing index. This behaviour can be turned off using + the `ignore_unavailable=true` parameter. By default, you must explicitly name + the indices you are opening or closing. To open or close indices with `_all`, + `*`, or other wildcard expressions, change the` action.destructive_requires_name` + setting to `false`. This setting can also be changed with the cluster update + settings API. Closed indices consume a significant amount of disk-space which + can cause problems in managed environments. Closing indices can be turned off + with the cluster settings API by setting `cluster.indices.close.enable` to `false`. ``_ @@ -1061,7 +1097,10 @@ def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyzes the disk usage of each field of an index or data stream. + Analyze the index disk usage. Analyze the disk usage of each field of an index + or data stream. This API might not support indices created in previous Elasticsearch + versions. The result of a small index can be inaccurate as some parts of an index + might not be analyzed by the API. ``_ @@ -1135,9 +1174,14 @@ def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Aggregates a time series (TSDS) index and stores pre-computed statistical summaries - (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped - by a configured time interval. + Downsample an index. Aggregate a time series (TSDS) index and store pre-computed + statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each + metric field grouped by a configured time interval. For example, a TSDS index + that contains metrics sampled every 10 seconds can be downsampled to an hourly + index. All documents within an hour interval are summarized and stored as a single + document in the downsample index. NOTE: Only indices in a time series data stream + are supported. Neither field nor document level security can be defined on the + source index. The source index must be read only (`index.blocks.write: true`). ``_ @@ -1456,8 +1500,8 @@ def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Retrieves information about an index - or data stream’s current data stream lifecycle status, such as time since index + Get the status for a data stream lifecycle. Get information about an index or + data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. @@ -1523,7 +1567,10 @@ def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns field usage information for each shard and field of an index. + Get field usage stats. Get field usage information for each shard and field of + an index. Field usage statistics are automatically captured when queries are + running on a cluster. A shard-level search request that accesses a given field, + even if multiple times during that request, is counted as a single use. ``_ @@ -1611,7 +1658,22 @@ def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Flushes one or more data streams or indices. + Flush data streams or indices. Flushing a data stream or index is the process + of making sure that any data that is currently only stored in the transaction + log is also permanently stored in the Lucene index. When restarting, Elasticsearch + replays any unflushed operations from the transaction log into the Lucene index + to bring it back into the state that it was in before the restart. Elasticsearch + automatically triggers flushes as needed, using heuristics that trade off the + size of the unflushed transaction log against the cost of performing each flush. + After each operation has been flushed it is permanently stored in the Lucene + index. This may mean that there is no need to maintain an additional copy of + it in the transaction log. The transaction log is made up of multiple files, + called generations, and Elasticsearch will delete any generation files when they + are no longer needed, freeing up disk space. It is also possible to trigger a + flush on one or more indices using the flush API, although it is rare for users + to need to call this API directly. If you call the flush API after indexing some + documents then a successful response indicates that Elasticsearch has flushed + all the documents that were indexed before the flush API was called. ``_ @@ -1694,7 +1756,21 @@ def forcemerge( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the force merge operation on one or more indices. + Force a merge. Perform the force merge operation on the shards of one or more + indices. For data streams, the API forces a merge on the shards of the stream's + backing indices. Merging reduces the number of segments in each shard by merging + some of them together and also frees up the space used by deleted documents. + Merging normally happens automatically, but sometimes it is useful to trigger + a merge manually. WARNING: We recommend force merging only a read-only index + (meaning the index is no longer receiving writes). When documents are updated + or deleted, the old version is not immediately removed but instead soft-deleted + and marked with a "tombstone". These soft-deleted documents are automatically + cleaned up during regular segment merges. But force merge can cause very large + (greater than 5 GB) segments to be produced, which are not eligible for regular + merges. So the number of soft-deleted documents can then grow rapidly, resulting + in higher disk usage and worse search performance. If you regularly force merge + an index receiving writes, this can also make snapshots more expensive, since + the new documents can't be backed up incrementally. ``_ @@ -2679,8 +2755,18 @@ def promote_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Promotes a data stream from a replicated data stream managed by CCR to a regular - data stream + Promote a data stream. Promote a data stream from a replicated data stream managed + by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, + a data stream from a remote cluster can be replicated to the local cluster. These + data streams can't be rolled over in the local cluster. These replicated data + streams roll over only if the upstream data stream rolls over. In the event that + the remote cluster is no longer available, the data stream in the local cluster + can be promoted to a regular data stream, which allows these data streams to + be rolled over in the local cluster. NOTE: When promoting a data stream, ensure + the local cluster has a data stream enabled index template that matches the data + stream. If this is missing, the data stream will not be able to roll over until + a matching index template is created. This will affect the lifecycle management + of the data stream and interfere with the data stream size and retention. ``_ @@ -3343,7 +3429,16 @@ def put_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an index pattern that matches the index name. + IMPORTANT: This documentation is about legacy index templates, which are deprecated + and will be replaced by the composable templates introduced in Elasticsearch + 7.8. Composable templates always take precedence over legacy templates. If no + composable template matches a new index, matching legacy templates are applied + according to their order. Index templates are only applied during index creation. + Changes to index templates do not affect existing indices. Settings and mappings + specified in create index API requests override any settings or mappings specified + in an index template. ``_ @@ -3423,9 +3518,25 @@ def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about ongoing and completed shard recoveries for one or more - indices. For data streams, the API returns information for the stream’s backing - indices. + Get index recovery information. Get information about ongoing and completed shard + recoveries for one or more indices. For data streams, the API returns information + for the stream's backing indices. Shard recovery is the process of initializing + a shard copy, such as restoring a primary shard from a snapshot or creating a + replica shard from a primary shard. When a shard recovery completes, the recovered + shard is available for search and indexing. Recovery automatically occurs during + the following processes: * When creating an index for the first time. * When + a node rejoins the cluster and starts up any missing primary shard copies using + the data that it holds in its data path. * Creation of new replica shard copies + from the primary. * Relocation of a shard copy to a different node in the same + cluster. * A snapshot restore operation. * A clone, shrink, or split operation. + You can determine the cause of a shard recovery using the recovery or cat recovery + APIs. The index recovery API reports information about completed recoveries only + for shard copies that currently exist in the cluster. It only reports the last + recovery for each shard copy and does not report historical information about + earlier recoveries, nor does it report information about the recoveries of shard + copies that no longer exist. This means that if a shard copy completes a recovery + and then Elasticsearch relocates it onto a different node then the information + about the original recovery will not be shown in the recovery API. ``_ @@ -3559,7 +3670,21 @@ def reload_search_analyzers( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads an index's search analyzers and their resources. + Reload search analyzers. Reload an index's search analyzers and their resources. + For data streams, the API reloads search analyzers and resources for the stream's + backing indices. IMPORTANT: After reloading the search analyzers you should clear + the request cache to make sure it doesn't contain responses derived from the + previous versions of the analyzer. You can use the reload search analyzers API + to pick up changes to synonym files used in the `synonym_graph` or `synonym` + token filter of a search analyzer. To be eligible, the token filter must have + an `updateable` flag of `true` and only be used in search analyzers. NOTE: This + API does not perform a reload for each shard of an index. Instead, it performs + a reload for each node containing index shards. As a result, the total shard + count returned by the API can differ from the number of index shards. Because + reloading affects every node with an index shard, it is important to update the + synonym file on every data node in the cluster--including nodes that don't contain + a shard replica--before using this API. This ensures the synonym file is updated + everywhere in the cluster in case shards are relocated in the future. ``_ @@ -3623,9 +3748,20 @@ def resolve_cluster( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified index expressions to return information about each cluster, - including the local cluster, if included. Multiple patterns and remote clusters - are supported. + Resolve the cluster. Resolve the specified index expressions to return information + about each cluster, including the local cluster, if included. Multiple patterns + and remote clusters are supported. This endpoint is useful before doing a cross-cluster + search in order to determine which remote clusters should be included in a search. + You use the same index expression with this endpoint as you would for cross-cluster + search. Index and cluster exclusions are also supported with this endpoint. For + each cluster in the index expression, information is returned about: * Whether + the querying ("local") cluster is currently connected to each remote cluster + in the index expression scope. * Whether each remote cluster is configured with + `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, + or data streams on that cluster that match the index expression. * Whether the + search is likely to have errors returned when you do the cross-cluster search + (including any authorization errors if you do not have permission to query the + index). * Cluster version information, including the Elasticsearch server version. ``_ @@ -3877,8 +4013,9 @@ def segments( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the stream’s backing indices. + Get index segments. Get low-level information about the Lucene segments in index + shards. For data streams, the API returns information about the stream's backing + indices. ``_ @@ -3957,8 +4094,14 @@ def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves store information about replica shards in one or more indices. For - data streams, the API retrieves store information for the stream’s backing indices. + Get index shard stores. Get store information about replica shards in one or + more indices. For data streams, the API retrieves store information for the stream's + backing indices. The index shard stores API returns the following information: + * The node on which each replica shard exists. * The allocation ID for each replica + shard. * A unique ID for each replica shard. * Any errors encountered while opening + the shard index or from an earlier failure. By default, the API returns store + information only for primary shards that are unassigned or have one or more unassigned + replica shards. ``_ @@ -4029,7 +4172,39 @@ def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Shrinks an existing index into a new index with fewer primary shards. + Shrink an index. Shrink an index into a new index with fewer primary shards. + Before you can shrink an index: * The index must be read-only. * A copy of every + shard in the index must reside on the same node. * The index must have a green + health status. To make shard allocation easier, we recommend you also remove + the index's replica shards. You can later re-add replica shards as part of the + shrink operation. The requested number of primary shards in the target index + must be a factor of the number of shards in the source index. For example an + index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an + index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards + in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must + be present on the same node. The current write index on a data stream cannot + be shrunk. In order to shrink the current write index, the data stream must first + be rolled over so that a new write index is created and then the previous write + index can be shrunk. A shrink operation: * Creates a new target index with the + same definition as the source index, but with a smaller number of primary shards. + * Hard-links segments from the source index into the target index. If the file + system does not support hard-linking, then all segments are copied into the new + index, which is a much more time consuming process. Also if using multiple data + paths, shards on different data paths require a full copy of segment files if + they are not on the same disk since hardlinks do not work across disks. * Recovers + the target index as though it were a closed index which had just been re-opened. + Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + * The target index must not exist. * The source index must have more primary + shards than the target index. * The number of primary shards in the target index + must be a factor of the number of primary shards in the source index. The source + index must have more primary shards than the target index. * The index must not + contain more than 2,147,483,519 documents in total across all shards that will + be shrunk into a single shard on the target index as this is the maximum number + of docs that can fit into a single shard. * The node handling the shrink process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4314,7 +4489,27 @@ def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Splits an existing index into a new index with more primary shards. + Split an index. Split an index into a new index with more primary shards. * Before + you can split an index: * The index must be read-only. * The cluster health status + must be green. The number of times the index can be split (and the number of + shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` + setting. The number of routing shards specifies the hashing space that is used + internally to distribute documents across shards with consistent hashing. For + instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x + 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target + index with the same definition as the source index, but with a larger number + of primary shards. * Hard-links segments from the source index into the target + index. If the file system doesn't support hard-linking, all segments are copied + into the new index, which is a much more time consuming process. * Hashes all + documents again, after low level files are created, to delete documents that + belong to a different shard. * Recovers the target index as though it were a + closed index which had just been re-opened. IMPORTANT: Indices can only be split + if they satisfy the following requirements: * The target index must not exist. + * The source index must have fewer primary shards than the target index. * The + number of primary shards in the target index must be a multiple of the number + of primary shards in the source index. * The node handling the split process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4406,8 +4601,14 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistics for one or more indices. For data streams, the API retrieves - statistics for the stream’s backing indices. + Get index statistics. For data streams, the API retrieves statistics for the + stream's backing indices. By default, the returned statistics are index-level + with `primaries` and `total` aggregations. `primaries` are the values for only + the primary shards. `total` are the accumulated values for both primary and replica + shards. To get shard-level statistics, set the `level` parameter to `shards`. + NOTE: When moving to another node, the shard-level statistics for a shard are + cleared. Although the shard is no longer part of the node, that node retains + any node-level statistics to which the shard contributed. ``_ @@ -4510,7 +4711,8 @@ def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreezes an index. + Unfreeze an index. When a frozen index is unfrozen, the index goes through the + normal recovery process and becomes writeable again. ``_ diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index dc64acd9e..b1f97a9fe 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -35,7 +35,9 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes licensing information for the cluster + Delete the license. When the license expires, your subscription level reverts + to Basic. If the operator privileges feature is enabled, only operator users + can use this API. ``_ """ @@ -72,9 +74,11 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get license information. Returns information about your Elastic license, including - its type, its status, when it was issued, and when it expires. For more information - about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + Get license information. Get information about your Elastic license including + its type, its status, when it was issued, and when it expires. NOTE: If the master + node is generating a new cluster state, the get license API may return a `404 + Not Found` response. If you receive an unexpected 404 response after cluster + startup, wait a short period and retry the request. ``_ @@ -120,7 +124,7 @@ def get_basic_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the basic license. + Get the basic license status. ``_ """ @@ -155,7 +159,7 @@ def get_trial_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the trial license. + Get the trial status. ``_ """ @@ -196,7 +200,14 @@ def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the license for the cluster. + Update the license. You can update your license at runtime without shutting down + your nodes. License updates take effect immediately. If the license you are installing + does not support all of the features that were available with your previous license, + however, you are notified in the response. You must then re-submit the API request + with the acknowledge parameter set to true. NOTE: If Elasticsearch security features + are enabled and you are installing a gold or higher license, you must enable + TLS on the transport networking layer before you install the license. If the + operator privileges feature is enabled, only operator users can use this API. ``_ @@ -250,12 +261,13 @@ def post_start_basic( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The start basic API enables you to initiate an indefinite basic license, which - gives access to all the basic features. If the basic license does not support - all of the features that are available with your current license, however, you - are notified in the response. You must then re-submit the API request with the - acknowledge parameter set to true. To check the status of your basic license, - use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + Start a basic license. Start an indefinite basic license, which gives access + to all the basic features. NOTE: In order to start a basic license, you must + not currently have a basic license. If the basic license does not support all + of the features that are available with your current license, however, you are + notified in the response. You must then re-submit the API request with the `acknowledge` + parameter set to `true`. To check the status of your basic license, use the get + basic license API. ``_ @@ -297,8 +309,12 @@ def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - The start trial API enables you to start a 30-day trial, which gives access to - all subscription features. + Start a trial. Start a 30-day trial, which gives access to all subscription features. + NOTE: You are allowed to start a trial only if your cluster has not already activated + a trial for the current major product version. For example, if you have already + activated a trial for v8.0, you cannot start a new trial until v9.0. You can, + however, request an extended trial at https://www.elastic.co/trialextension. + To check the status of your trial, use the get trial status API. ``_ diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 2146426ec..addf8e994 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -2488,6 +2488,7 @@ def get_trained_models( ], ] ] = None, + include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2514,6 +2515,8 @@ def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. + :param include_model_definition: parameter is deprecated! Use [include=definition] + instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2543,6 +2546,8 @@ def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include + if include_model_definition is not None: + __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -3169,9 +3174,11 @@ def put_calendar_job( "description", "headers", "max_num_threads", + "meta", "model_memory_limit", "version", ), + parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) def put_data_frame_analytics( @@ -3189,6 +3196,7 @@ def put_data_frame_analytics( headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, + meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, @@ -3249,6 +3257,7 @@ def put_data_frame_analytics( Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs @@ -3293,6 +3302,8 @@ def put_data_frame_analytics( __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads + if meta is not None: + __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: @@ -3311,6 +3322,7 @@ def put_data_frame_analytics( @_rewrite_parameters( body_fields=( "aggregations", + "aggs", "chunking_config", "delayed_data_check_config", "frequency", @@ -3333,6 +3345,7 @@ def put_datafeed( *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, @@ -3386,6 +3399,8 @@ def put_datafeed( :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + :param aggs: If set, the datafeed performs aggregation searches. Support for + aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. @@ -3473,6 +3488,8 @@ def put_datafeed( if not __body: if aggregations is not None: __body["aggregations"] = aggregations + if aggs is not None: + __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: @@ -3595,6 +3612,7 @@ def put_job( analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] @@ -3604,9 +3622,19 @@ def put_job( datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, @@ -3641,6 +3669,9 @@ def put_job( to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + :param allow_no_indices: If `true`, wildcard indices expressions that resolve + into no concrete indices are ignored. This includes the `_all` string or + when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for @@ -3664,7 +3695,20 @@ def put_job( using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values. Valid + values are: * `all`: Match any data stream or index, including hidden ones. + * `closed`: Match closed, non-hidden indices. Also matches any non-hidden + data stream. Data streams cannot be closed. * `hidden`: Match hidden data + streams and hidden indices. Must be combined with `open`, `closed`, or both. + * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden + indices. Also matches any non-hidden data stream. :param groups: A list of job groups. A job can belong to no groups or many. + :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + ignored when frozen. + :param ignore_unavailable: If `true`, unavailable indices (missing or closed) + are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance @@ -3704,12 +3748,20 @@ def put_job( __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 3a4b4a6c5..61f2d54c3 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -44,8 +44,8 @@ def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_