diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index ec9596dee..802ec316f 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -626,12 +626,14 @@ async def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -651,6 +653,8 @@ async def bulk( :param operations: :param index: Name of the data stream, index, or index alias to perform bulk actions on. + :param list_executed_pipelines: If `true`, the response will include the ingest + pipelines that were executed for each index or create. :param pipeline: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final @@ -661,6 +665,8 @@ async def bulk( make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to-be-created). :param routing: Custom value used to route operations to a specific shard. :param source: `true` or `false` to return the `_source` field or not, or a list of fields to return. @@ -694,6 +700,8 @@ async def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if list_executed_pipelines is not None: + __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -702,6 +710,8 @@ async def bulk( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: @@ -2274,7 +2284,26 @@ async def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index c3f5ec8dc..03c17de2d 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -44,7 +44,13 @@ async def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ async def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ async def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ async def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ async def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ async def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ async def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ async def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ async def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ async def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ async def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 66df40190..009706e41 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -38,9 +38,9 @@ async def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. You cannot delete policies - that are currently in use. If the policy is being used to manage any indices, - the request fails and returns an error. + Delete a lifecycle policy. You cannot delete policies that are currently in use. + If the policy is being used to manage any indices, the request fails and returns + an error. ``_ @@ -93,9 +93,11 @@ async def explain_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index’s current lifecycle state, such as the - currently executing phase, action, and step. Shows when the index entered each - one, the definition of the running phase, and information about any failures. + Explain the lifecycle state. Get the current lifecycle status for one or more + indices. For data streams, the API retrieves the current lifecycle status for + the stream's backing indices. The response indicates when the index entered each + lifecycle state, provides the definition of the running phase, and information + about any failures. ``_ @@ -157,7 +159,7 @@ async def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a lifecycle policy. + Get lifecycle policies. ``_ @@ -208,7 +210,7 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current index lifecycle management (ILM) status. + Get the ILM status. Get the current index lifecycle management status. ``_ """ @@ -249,10 +251,18 @@ async def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Switches the indices, ILM policies, and legacy, composable and component templates - from using custom node attributes and attribute-based allocation filters to using - data tiers, and optionally deletes one legacy index template.+ Using node roles - enables ILM to automatically move the indices between data tiers. + Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + composable, and component templates from using custom node attributes and attribute-based + allocation filters to using data tiers. Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers. + Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual + steps listed in the migration guide: 1. Stop setting the custom hot attribute + on new indices. 1. Remove custom allocation settings from existing ILM policies. + 1. Replace custom allocation settings from existing indices with the corresponding + tier preference. ILM must be stopped before performing the migration. Use the + stop ILM and get ILM status APIs to wait until the reported operation mode is + `STOPPED`. ``_ @@ -312,7 +322,21 @@ async def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Manually moves an index into the specified step and executes that step. + Move to a lifecycle step. Manually move an index into a specific step in the + lifecycle policy and run that step. WARNING: This operation can result in the + loss of data. Manually moving an index into a specific step runs that step even + if it has already been performed. This is a potentially destructive action and + this should be considered an expert level API. You must specify both the current + step and the step to be executed in the body of the request. The request will + fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the + next step. When specifying the target (`next_step`) to which the index will be + moved, either the name or both the action and name fields are optional. If only + the phase is specified, the index will move to the first step of the first action + in the target phase. If the phase and action are specified, the index will move + to the first step of the specified action in the specified phase. Only actions + specified in the ILM policy are considered valid. An index cannot move to a step + that is not part of its policy. ``_ @@ -375,8 +399,9 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy. If the specified policy exists, the policy is replaced - and the policy version is incremented. + Create or update a lifecycle policy. If the specified policy exists, it is replaced + and the policy version is incremented. NOTE: Only the latest version of the policy + is stored, you cannot revert to previous versions. ``_ @@ -435,7 +460,8 @@ async def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the assigned lifecycle policy and stops managing the specified index + Remove policies from an index. Remove the assigned lifecycle policies from an + index or a data stream's backing indices. It also stops managing the indices. ``_ @@ -475,7 +501,10 @@ async def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retries executing the policy for an index that is in the ERROR step. + Retry a policy. Retry running the lifecycle policy for an index that is in the + ERROR step. The API sets the policy back to the step where the error occurred + and runs the step. Use the explain lifecycle state API to determine whether an + index is in the ERROR step. ``_ @@ -517,7 +546,9 @@ async def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the index lifecycle management (ILM) plugin. + Start the ILM plugin. Start the index lifecycle management plugin if it is currently + stopped. ILM is started automatically when the cluster is formed. Restarting + ILM is necessary only when it has been stopped using the stop ILM API. ``_ @@ -561,8 +592,12 @@ async def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Halts all lifecycle management operations and stops the index lifecycle management - (ILM) plugin + Stop the ILM plugin. Halt all lifecycle management operations and stop the index + lifecycle management plugin. This is useful when you are performing maintenance + on the cluster and need to prevent ILM from performing any actions on your indices. + The API returns as soon as the stop request has been acknowledged, but the plugin + might continue to run until in-progress operations complete and the plugin can + be safely stopped. Use the get ILM status API to check whether ILM is running. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index e7af76ecc..744d0b678 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -1456,8 +1456,8 @@ async def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Retrieves information about an index - or data stream’s current data stream lifecycle status, such as time since index + Get the status for a data stream lifecycle. Get information about an index or + data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index ecd516365..6335461d6 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -77,6 +77,57 @@ async def delete_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + async def delete_ip_location_database( + self, + *, + id: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Deletes an IP location database configuration. + + ``_ + + :param id: A comma-separated list of IP location database configurations to delete + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.delete_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def delete_pipeline( self, @@ -217,6 +268,57 @@ async def get_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + async def get_ip_location_database( + self, + *, + id: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Returns information about one or more IP location database configurations. + + ``_ + + :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard + (`*`) expressions are supported. To get all database configurations, omit + this parameter or use `*`. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + """ + __path_parts: t.Dict[str, str] + if id not in SKIP_IN_PATH: + __path_parts = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + else: + __path_parts = {} + __path = "/_ingest/ip_location/database" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.get_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def get_pipeline( self, @@ -384,6 +486,70 @@ async def put_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="configuration", + ) + async def put_ip_location_database( + self, + *, + id: str, + configuration: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Returns information about one or more IP location database configurations. + + ``_ + + :param id: ID of the database configuration to create or update. + :param configuration: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if configuration is None and body is None: + raise ValueError( + "Empty value passed for parameters 'configuration' and 'body', one of them should be set." + ) + elif configuration is not None and body is not None: + raise ValueError("Cannot set both 'configuration' and 'body'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = configuration if configuration is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="ingest.put_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "deprecated", diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index da3a23b1c..4fd9282aa 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -2488,6 +2488,7 @@ async def get_trained_models( ], ] ] = None, + include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2514,6 +2515,8 @@ async def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. + :param include_model_definition: parameter is deprecated! Use [include=definition] + instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2543,6 +2546,8 @@ async def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include + if include_model_definition is not None: + __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -3169,9 +3174,11 @@ async def put_calendar_job( "description", "headers", "max_num_threads", + "meta", "model_memory_limit", "version", ), + parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) async def put_data_frame_analytics( @@ -3189,6 +3196,7 @@ async def put_data_frame_analytics( headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, + meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, @@ -3249,6 +3257,7 @@ async def put_data_frame_analytics( Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs @@ -3293,6 +3302,8 @@ async def put_data_frame_analytics( __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads + if meta is not None: + __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: @@ -3311,6 +3322,7 @@ async def put_data_frame_analytics( @_rewrite_parameters( body_fields=( "aggregations", + "aggs", "chunking_config", "delayed_data_check_config", "frequency", @@ -3333,6 +3345,7 @@ async def put_datafeed( *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, @@ -3386,6 +3399,8 @@ async def put_datafeed( :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + :param aggs: If set, the datafeed performs aggregation searches. Support for + aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. @@ -3473,6 +3488,8 @@ async def put_datafeed( if not __body: if aggregations is not None: __body["aggregations"] = aggregations + if aggs is not None: + __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: @@ -3595,6 +3612,7 @@ async def put_job( analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] @@ -3604,9 +3622,19 @@ async def put_job( datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, @@ -3641,6 +3669,9 @@ async def put_job( to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + :param allow_no_indices: If `true`, wildcard indices expressions that resolve + into no concrete indices are ignored. This includes the `_all` string or + when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for @@ -3664,7 +3695,20 @@ async def put_job( using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values. Valid + values are: * `all`: Match any data stream or index, including hidden ones. + * `closed`: Match closed, non-hidden indices. Also matches any non-hidden + data stream. Data streams cannot be closed. * `hidden`: Match hidden data + streams and hidden indices. Must be combined with `open`, `closed`, or both. + * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden + indices. Also matches any non-hidden data stream. :param groups: A list of job groups. A job can belong to no groups or many. + :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + ignored when frozen. + :param ignore_unavailable: If `true`, unavailable indices (missing or closed) + are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance @@ -3704,12 +3748,20 @@ async def put_job( __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index a7b516588..5aa8aa0be 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -44,8 +44,8 @@ async def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ async def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ async def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ async def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ async def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ async def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ async def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 01405a1dd..bebd41cd8 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -624,12 +624,14 @@ def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -649,6 +651,8 @@ def bulk( :param operations: :param index: Name of the data stream, index, or index alias to perform bulk actions on. + :param list_executed_pipelines: If `true`, the response will include the ingest + pipelines that were executed for each index or create. :param pipeline: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final @@ -659,6 +663,8 @@ def bulk( make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to-be-created). :param routing: Custom value used to route operations to a specific shard. :param source: `true` or `false` to return the `_source` field or not, or a list of fields to return. @@ -692,6 +698,8 @@ def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if list_executed_pipelines is not None: + __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -700,6 +708,8 @@ def bulk( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: @@ -2272,7 +2282,26 @@ def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index d7f60e889..6c1afa6c7 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -44,7 +44,13 @@ def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index 4f8196869..6ace9ee5c 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -38,9 +38,9 @@ def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. You cannot delete policies - that are currently in use. If the policy is being used to manage any indices, - the request fails and returns an error. + Delete a lifecycle policy. You cannot delete policies that are currently in use. + If the policy is being used to manage any indices, the request fails and returns + an error. ``_ @@ -93,9 +93,11 @@ def explain_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index’s current lifecycle state, such as the - currently executing phase, action, and step. Shows when the index entered each - one, the definition of the running phase, and information about any failures. + Explain the lifecycle state. Get the current lifecycle status for one or more + indices. For data streams, the API retrieves the current lifecycle status for + the stream's backing indices. The response indicates when the index entered each + lifecycle state, provides the definition of the running phase, and information + about any failures. ``_ @@ -157,7 +159,7 @@ def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a lifecycle policy. + Get lifecycle policies. ``_ @@ -208,7 +210,7 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current index lifecycle management (ILM) status. + Get the ILM status. Get the current index lifecycle management status. ``_ """ @@ -249,10 +251,18 @@ def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Switches the indices, ILM policies, and legacy, composable and component templates - from using custom node attributes and attribute-based allocation filters to using - data tiers, and optionally deletes one legacy index template.+ Using node roles - enables ILM to automatically move the indices between data tiers. + Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + composable, and component templates from using custom node attributes and attribute-based + allocation filters to using data tiers. Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers. + Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual + steps listed in the migration guide: 1. Stop setting the custom hot attribute + on new indices. 1. Remove custom allocation settings from existing ILM policies. + 1. Replace custom allocation settings from existing indices with the corresponding + tier preference. ILM must be stopped before performing the migration. Use the + stop ILM and get ILM status APIs to wait until the reported operation mode is + `STOPPED`. ``_ @@ -312,7 +322,21 @@ def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Manually moves an index into the specified step and executes that step. + Move to a lifecycle step. Manually move an index into a specific step in the + lifecycle policy and run that step. WARNING: This operation can result in the + loss of data. Manually moving an index into a specific step runs that step even + if it has already been performed. This is a potentially destructive action and + this should be considered an expert level API. You must specify both the current + step and the step to be executed in the body of the request. The request will + fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the + next step. When specifying the target (`next_step`) to which the index will be + moved, either the name or both the action and name fields are optional. If only + the phase is specified, the index will move to the first step of the first action + in the target phase. If the phase and action are specified, the index will move + to the first step of the specified action in the specified phase. Only actions + specified in the ILM policy are considered valid. An index cannot move to a step + that is not part of its policy. ``_ @@ -375,8 +399,9 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy. If the specified policy exists, the policy is replaced - and the policy version is incremented. + Create or update a lifecycle policy. If the specified policy exists, it is replaced + and the policy version is incremented. NOTE: Only the latest version of the policy + is stored, you cannot revert to previous versions. ``_ @@ -435,7 +460,8 @@ def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the assigned lifecycle policy and stops managing the specified index + Remove policies from an index. Remove the assigned lifecycle policies from an + index or a data stream's backing indices. It also stops managing the indices. ``_ @@ -475,7 +501,10 @@ def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retries executing the policy for an index that is in the ERROR step. + Retry a policy. Retry running the lifecycle policy for an index that is in the + ERROR step. The API sets the policy back to the step where the error occurred + and runs the step. Use the explain lifecycle state API to determine whether an + index is in the ERROR step. ``_ @@ -517,7 +546,9 @@ def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the index lifecycle management (ILM) plugin. + Start the ILM plugin. Start the index lifecycle management plugin if it is currently + stopped. ILM is started automatically when the cluster is formed. Restarting + ILM is necessary only when it has been stopped using the stop ILM API. ``_ @@ -561,8 +592,12 @@ def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Halts all lifecycle management operations and stops the index lifecycle management - (ILM) plugin + Stop the ILM plugin. Halt all lifecycle management operations and stop the index + lifecycle management plugin. This is useful when you are performing maintenance + on the cluster and need to prevent ILM from performing any actions on your indices. + The API returns as soon as the stop request has been acknowledged, but the plugin + might continue to run until in-progress operations complete and the plugin can + be safely stopped. Use the get ILM status API to check whether ILM is running. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index b27909af1..7a29fe83f 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -1456,8 +1456,8 @@ def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Retrieves information about an index - or data stream’s current data stream lifecycle status, such as time since index + Get the status for a data stream lifecycle. Get information about an index or + data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index db211c1c3..a716ce45c 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -77,6 +77,57 @@ def delete_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + def delete_ip_location_database( + self, + *, + id: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Deletes an IP location database configuration. + + ``_ + + :param id: A comma-separated list of IP location database configurations to delete + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.delete_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() def delete_pipeline( self, @@ -217,6 +268,57 @@ def get_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + def get_ip_location_database( + self, + *, + id: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Returns information about one or more IP location database configurations. + + ``_ + + :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard + (`*`) expressions are supported. To get all database configurations, omit + this parameter or use `*`. + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + """ + __path_parts: t.Dict[str, str] + if id not in SKIP_IN_PATH: + __path_parts = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + else: + __path_parts = {} + __path = "/_ingest/ip_location/database" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.get_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() def get_pipeline( self, @@ -384,6 +486,70 @@ def put_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="configuration", + ) + def put_ip_location_database( + self, + *, + id: str, + configuration: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Returns information about one or more IP location database configurations. + + ``_ + + :param id: ID of the database configuration to create or update. + :param configuration: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if configuration is None and body is None: + raise ValueError( + "Empty value passed for parameters 'configuration' and 'body', one of them should be set." + ) + elif configuration is not None and body is not None: + raise ValueError("Cannot set both 'configuration' and 'body'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = configuration if configuration is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="ingest.put_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "deprecated", diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index ebf72ef18..1768a07e1 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -2488,6 +2488,7 @@ def get_trained_models( ], ] ] = None, + include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2514,6 +2515,8 @@ def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. + :param include_model_definition: parameter is deprecated! Use [include=definition] + instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2543,6 +2546,8 @@ def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include + if include_model_definition is not None: + __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -3169,9 +3174,11 @@ def put_calendar_job( "description", "headers", "max_num_threads", + "meta", "model_memory_limit", "version", ), + parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) def put_data_frame_analytics( @@ -3189,6 +3196,7 @@ def put_data_frame_analytics( headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, + meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, @@ -3249,6 +3257,7 @@ def put_data_frame_analytics( Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs @@ -3293,6 +3302,8 @@ def put_data_frame_analytics( __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads + if meta is not None: + __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: @@ -3311,6 +3322,7 @@ def put_data_frame_analytics( @_rewrite_parameters( body_fields=( "aggregations", + "aggs", "chunking_config", "delayed_data_check_config", "frequency", @@ -3333,6 +3345,7 @@ def put_datafeed( *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, @@ -3386,6 +3399,8 @@ def put_datafeed( :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + :param aggs: If set, the datafeed performs aggregation searches. Support for + aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. @@ -3473,6 +3488,8 @@ def put_datafeed( if not __body: if aggregations is not None: __body["aggregations"] = aggregations + if aggs is not None: + __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: @@ -3595,6 +3612,7 @@ def put_job( analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] @@ -3604,9 +3622,19 @@ def put_job( datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, @@ -3641,6 +3669,9 @@ def put_job( to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + :param allow_no_indices: If `true`, wildcard indices expressions that resolve + into no concrete indices are ignored. This includes the `_all` string or + when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for @@ -3664,7 +3695,20 @@ def put_job( using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values. Valid + values are: * `all`: Match any data stream or index, including hidden ones. + * `closed`: Match closed, non-hidden indices. Also matches any non-hidden + data stream. Data streams cannot be closed. * `hidden`: Match hidden data + streams and hidden indices. Must be combined with `open`, `closed`, or both. + * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden + indices. Also matches any non-hidden data stream. :param groups: A list of job groups. A job can belong to no groups or many. + :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + ignored when frozen. + :param ignore_unavailable: If `true`, unavailable indices (missing or closed) + are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance @@ -3704,12 +3748,20 @@ def put_job( __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 5c8e36979..13e5254ef 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -44,8 +44,8 @@ def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_